<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet href="/stylesheet.xsl" type="text/xsl"?>
<rss version="2.0" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:sy="http://purl.org/rss/1.0/modules/syndication/" xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:podcast="https://podcastindex.org/namespace/1.0">
  <channel>
    <atom:link rel="self" type="application/rss+xml" href="https://feeds.transistor.fm/in-ai-we-trust" title="MP3 Audio"/>
    <atom:link rel="hub" href="https://pubsubhubbub.appspot.com/"/>
    <podcast:podping usesPodping="true"/>
    <title>In AI We Trust?</title>
    <generator>Transistor (https://transistor.fm)</generator>
    <itunes:new-feed-url>https://feeds.transistor.fm/in-ai-we-trust</itunes:new-feed-url>
    <description>In AI We Trust? is a podcast with Miriam Vogel of EqualAI that amplifies leaders and voices from across society who are defining, developing, and deploying AI best practices across government, industry, academia, and civil society. In each episode, Miriam sits down with guests to demystify the technology, discuss the importance of AI governance and AI literacy, and spark conversations about people’s hopes, fears, and opportunities in an AI-shaped future.</description>
    <copyright>© 2026 Miriam Vogel</copyright>
    <podcast:guid>e8c5d92a-32bc-5943-b149-d3234a13959c</podcast:guid>
    <podcast:locked owner="Miriam.Vogel@equalai.org">no</podcast:locked>
    <podcast:trailer pubdate="Tue, 02 Mar 2021 14:31:45 -0500" url="https://media.transistor.fm/d07b60b9/780d7f19.mp3" length="3306645" type="audio/mpeg">Welcome to In AI We Trust?</podcast:trailer>
    <language>en</language>
    <pubDate>Thu, 30 Apr 2026 11:16:22 -0400</pubDate>
    <lastBuildDate>Thu, 30 Apr 2026 11:17:13 -0400</lastBuildDate>
    <link>https://equalai.transistor.fm/</link>
    
    <itunes:category text="Technology"/>
    <itunes:category text="News">
      <itunes:category text="Tech News"/>
    </itunes:category>
    <itunes:type>episodic</itunes:type>
    <itunes:author>Miriam Vogel</itunes:author>
    <itunes:image href="https://img.transistorcdn.com/TzVlXT9RcfvcvqUoHaRRn_iCY65L7b9WBHZBi-5k3-M/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9zaG93/LzE5MDc2LzE2MTUz/OTEyODAtYXJ0d29y/ay5qcGc.jpg"/>
    <itunes:summary>In AI We Trust? is a podcast with Miriam Vogel of EqualAI that amplifies leaders and voices from across society who are defining, developing, and deploying AI best practices across government, industry, academia, and civil society. In each episode, Miriam sits down with guests to demystify the technology, discuss the importance of AI governance and AI literacy, and spark conversations about people’s hopes, fears, and opportunities in an AI-shaped future.</itunes:summary>
    <itunes:subtitle>In AI We Trust.</itunes:subtitle>
    <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
    <itunes:owner>
      <itunes:name>Miriam Vogel</itunes:name>
    </itunes:owner>
    <itunes:complete>No</itunes:complete>
    <itunes:explicit>No</itunes:explicit>
    <item>
      <title>Inside the Department of Labor's Plan to Make American Workers AI-Ready</title>
      <itunes:episode>124</itunes:episode>
      <podcast:episode>124</podcast:episode>
      <itunes:title>Inside the Department of Labor's Plan to Make American Workers AI-Ready</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">d1499be7-301d-4b54-9b24-1324e5b1391c</guid>
      <link>https://share.transistor.fm/s/822d80ca</link>
      <description>
        <![CDATA[<p>In this episode, Miriam Vogel, President and CEO of EqualAI, sits down with Taylor Stockton, Chief Innovation Officer at the U.S. Department of Labor, to discuss what the federal government is doing to prepare workers for the AI economy. Stockton walks through the DOL's AI Literacy Framework, a text message-based literacy course designed to reach workers where they are, modernized apprenticeships with embedded AI skills, and the new AI Workforce Hub — a real-time resource tracking how AI is transforming jobs across sectors. Taylor shares his favorite use cases and how the federal government is adhering to the Executive Order to increase its own AI use.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, Miriam Vogel, President and CEO of EqualAI, sits down with Taylor Stockton, Chief Innovation Officer at the U.S. Department of Labor, to discuss what the federal government is doing to prepare workers for the AI economy. Stockton walks through the DOL's AI Literacy Framework, a text message-based literacy course designed to reach workers where they are, modernized apprenticeships with embedded AI skills, and the new AI Workforce Hub — a real-time resource tracking how AI is transforming jobs across sectors. Taylor shares his favorite use cases and how the federal government is adhering to the Executive Order to increase its own AI use.</p>]]>
      </content:encoded>
      <pubDate>Thu, 30 Apr 2026 11:16:18 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/822d80ca/c83c1e88.mp3" length="49564294" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2063</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode, Miriam Vogel, President and CEO of EqualAI, sits down with Taylor Stockton, Chief Innovation Officer at the U.S. Department of Labor, to discuss what the federal government is doing to prepare workers for the AI economy. Stockton walks through the DOL's AI Literacy Framework, a text message-based literacy course designed to reach workers where they are, modernized apprenticeships with embedded AI skills, and the new AI Workforce Hub — a real-time resource tracking how AI is transforming jobs across sectors. Taylor shares his favorite use cases and how the federal government is adhering to the Executive Order to increase its own AI use.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Learning at Scale: Live from the ASU+GSV Summit with Deborah Quazzo</title>
      <itunes:episode>123</itunes:episode>
      <podcast:episode>123</podcast:episode>
      <itunes:title>Learning at Scale: Live from the ASU+GSV Summit with Deborah Quazzo</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">1757cd38-4f7c-4d90-b742-8781c35dd375</guid>
      <link>https://share.transistor.fm/s/5ca597cf</link>
      <description>
        <![CDATA[<p>In this special episode of <em>In AI We Trust?</em>, Miriam Vogel sits down with Deborah Quazzo, Managing Partner of GSV Ventures and co-founder of ASU+GSV Summit, one of the most influential gatherings at the intersection of education and innovation. Recorded as the Summit unfolds, Quazzo offers a rare behind-the-scenes look at how ASU+GSV has grown into a "multi-dimensional marketplace" — one that puts philanthropists, commercial investors, K-12 superintendents, and university presidents in the same room to tackle education's biggest challenges together. She makes a compelling case for AI's potential to create personalized learning pathways, close persistent skills gaps, and drive students toward mastery at scale — and why getting the governance right is what makes all of it possible.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this special episode of <em>In AI We Trust?</em>, Miriam Vogel sits down with Deborah Quazzo, Managing Partner of GSV Ventures and co-founder of ASU+GSV Summit, one of the most influential gatherings at the intersection of education and innovation. Recorded as the Summit unfolds, Quazzo offers a rare behind-the-scenes look at how ASU+GSV has grown into a "multi-dimensional marketplace" — one that puts philanthropists, commercial investors, K-12 superintendents, and university presidents in the same room to tackle education's biggest challenges together. She makes a compelling case for AI's potential to create personalized learning pathways, close persistent skills gaps, and drive students toward mastery at scale — and why getting the governance right is what makes all of it possible.</p>]]>
      </content:encoded>
      <pubDate>Mon, 13 Apr 2026 13:23:12 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/5ca597cf/92ab63cf.mp3" length="60117207" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2504</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this special episode of <em>In AI We Trust?</em>, Miriam Vogel sits down with Deborah Quazzo, Managing Partner of GSV Ventures and co-founder of ASU+GSV Summit, one of the most influential gatherings at the intersection of education and innovation. Recorded as the Summit unfolds, Quazzo offers a rare behind-the-scenes look at how ASU+GSV has grown into a "multi-dimensional marketplace" — one that puts philanthropists, commercial investors, K-12 superintendents, and university presidents in the same room to tackle education's biggest challenges together. She makes a compelling case for AI's potential to create personalized learning pathways, close persistent skills gaps, and drive students toward mastery at scale — and why getting the governance right is what makes all of it possible.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Robbie Torney on Common Sense Media’s 2026 Summit (this week), Impact of AI on Children, and New Report on Kids and Families</title>
      <itunes:episode>122</itunes:episode>
      <podcast:episode>122</podcast:episode>
      <itunes:title>Robbie Torney on Common Sense Media’s 2026 Summit (this week), Impact of AI on Children, and New Report on Kids and Families</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">94ed18cd-03c0-455a-9dd0-57e9031c8dc0</guid>
      <link>https://share.transistor.fm/s/21a64d5e</link>
      <description>
        <![CDATA[<p>On this episode of <em>In AI We Trust?</em>, EqualAI President &amp; CEO Miriam Vogel and EqualAI Senior Advisor Nuala O’Connor co-host a conversation with Robbie Torney, Head of AI &amp; Digital Assessments at Common Sense Media. They discuss the third annual Common Sense Media’s Summit on Kids and Families (March 23-24), the premier cross-sector gathering on child wellbeing in the digital age. Robbie Torney shares his valuable insights on how AI tools are affecting children’s learning and development, mental health, and growing up in a world that is hyperconnected. They dig in on topics impacting kids, families, and educators, including AI companions, chatbots, and AI toys, the importance of AI safety features and parental controls, gaps in AI usage among boys and girls, and how to protect kids in the AI era. </p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>On this episode of <em>In AI We Trust?</em>, EqualAI President &amp; CEO Miriam Vogel and EqualAI Senior Advisor Nuala O’Connor co-host a conversation with Robbie Torney, Head of AI &amp; Digital Assessments at Common Sense Media. They discuss the third annual Common Sense Media’s Summit on Kids and Families (March 23-24), the premier cross-sector gathering on child wellbeing in the digital age. Robbie Torney shares his valuable insights on how AI tools are affecting children’s learning and development, mental health, and growing up in a world that is hyperconnected. They dig in on topics impacting kids, families, and educators, including AI companions, chatbots, and AI toys, the importance of AI safety features and parental controls, gaps in AI usage among boys and girls, and how to protect kids in the AI era. </p>]]>
      </content:encoded>
      <pubDate>Tue, 24 Mar 2026 10:02:23 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/21a64d5e/91a80341.mp3" length="76067640" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3168</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>On this episode of <em>In AI We Trust?</em>, EqualAI President &amp; CEO Miriam Vogel and EqualAI Senior Advisor Nuala O’Connor co-host a conversation with Robbie Torney, Head of AI &amp; Digital Assessments at Common Sense Media. They discuss the third annual Common Sense Media’s Summit on Kids and Families (March 23-24), the premier cross-sector gathering on child wellbeing in the digital age. Robbie Torney shares his valuable insights on how AI tools are affecting children’s learning and development, mental health, and growing up in a world that is hyperconnected. They dig in on topics impacting kids, families, and educators, including AI companions, chatbots, and AI toys, the importance of AI safety features and parental controls, gaps in AI usage among boys and girls, and how to protect kids in the AI era. </p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>AI Literacy Series Ep. 15: Dr. Chris Howard, Executive Vice President and Chief Operating Officer of Arizona State University</title>
      <itunes:episode>121</itunes:episode>
      <podcast:episode>121</podcast:episode>
      <itunes:title>AI Literacy Series Ep. 15: Dr. Chris Howard, Executive Vice President and Chief Operating Officer of Arizona State University</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">0e2e5cc5-285e-4e5f-b43e-79b423a5e0bc</guid>
      <link>https://share.transistor.fm/s/9159d8d7</link>
      <description>
        <![CDATA[<p>On this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel sat down for a conversation with Dr. Chris Howard, Executive Vice President and Chief Operating Officer of Arizona State University (ASU). They reflected on EqualAI’s recent AI Literacy Initiative event with ASU in Scottsdale, Arizona, featuring ASU President Michael Crow, Taylor Stockton, CIO of DOL, ASU Chief Information Officer Lev Gonick, Dean of ASU’s Walter Cronkite School of Journalism Battinto Batts, and Dean of ASU’s Mary Lou Fulton Teachers College Carole Basile, among other notable speakers. Dr. Howard and Miriam Vogel dove into ASU leadership on AI and education, including how ASU is using AI to help build human capacity and create impact for students and learners as they prepare to enter the workforce, and how AI fits in with ASU’s charter statement.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>On this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel sat down for a conversation with Dr. Chris Howard, Executive Vice President and Chief Operating Officer of Arizona State University (ASU). They reflected on EqualAI’s recent AI Literacy Initiative event with ASU in Scottsdale, Arizona, featuring ASU President Michael Crow, Taylor Stockton, CIO of DOL, ASU Chief Information Officer Lev Gonick, Dean of ASU’s Walter Cronkite School of Journalism Battinto Batts, and Dean of ASU’s Mary Lou Fulton Teachers College Carole Basile, among other notable speakers. Dr. Howard and Miriam Vogel dove into ASU leadership on AI and education, including how ASU is using AI to help build human capacity and create impact for students and learners as they prepare to enter the workforce, and how AI fits in with ASU’s charter statement.</p>]]>
      </content:encoded>
      <pubDate>Wed, 11 Mar 2026 15:54:13 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/9159d8d7/1e3cee31.mp3" length="59689280" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2484</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>On this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel sat down for a conversation with Dr. Chris Howard, Executive Vice President and Chief Operating Officer of Arizona State University (ASU). They reflected on EqualAI’s recent AI Literacy Initiative event with ASU in Scottsdale, Arizona, featuring ASU President Michael Crow, Taylor Stockton, CIO of DOL, ASU Chief Information Officer Lev Gonick, Dean of ASU’s Walter Cronkite School of Journalism Battinto Batts, and Dean of ASU’s Mary Lou Fulton Teachers College Carole Basile, among other notable speakers. Dr. Howard and Miriam Vogel dove into ASU leadership on AI and education, including how ASU is using AI to help build human capacity and create impact for students and learners as they prepare to enter the workforce, and how AI fits in with ASU’s charter statement.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Maintaining Optimism in a World of AI Opportunity and Risk - John Bailey, Nonresident Senior Fellow at American Enterprise Institute</title>
      <itunes:episode>120</itunes:episode>
      <podcast:episode>120</podcast:episode>
      <itunes:title>Maintaining Optimism in a World of AI Opportunity and Risk - John Bailey, Nonresident Senior Fellow at American Enterprise Institute</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">49ea51fb-ca34-43ae-ae45-5f322dd4120b</guid>
      <link>https://share.transistor.fm/s/b9e0566c</link>
      <description>
        <![CDATA[<p>On this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel and EqualAI Senior Advisor Nuala O’Connor sit down with John Bailey, Nonresident Senior Fellow at American Enterprise Institute, where he focuses on the intersection of technology and innovation in areas such as education and the workforce, and how to leverage the benefits of emerging technologies while minimizing the risks. John shares his thoughts on the challenges and opportunities that AI presents, including in education, healthcare/mental health and the workforce, and why he is optimistic about the future of AI and new possibilities being opened up by generative AI and agentic AI. He also shares his thoughts on the importance of AI literacy and ensuring people’s trust in AI, as well as ways that AI frontier labs, educators and others can help guide a path toward safe and beneficial AI adoption.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>On this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel and EqualAI Senior Advisor Nuala O’Connor sit down with John Bailey, Nonresident Senior Fellow at American Enterprise Institute, where he focuses on the intersection of technology and innovation in areas such as education and the workforce, and how to leverage the benefits of emerging technologies while minimizing the risks. John shares his thoughts on the challenges and opportunities that AI presents, including in education, healthcare/mental health and the workforce, and why he is optimistic about the future of AI and new possibilities being opened up by generative AI and agentic AI. He also shares his thoughts on the importance of AI literacy and ensuring people’s trust in AI, as well as ways that AI frontier labs, educators and others can help guide a path toward safe and beneficial AI adoption.</p>]]>
      </content:encoded>
      <pubDate>Wed, 04 Feb 2026 09:50:51 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/b9e0566c/5b8b0777.mp3" length="68594126" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2855</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>On this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel and EqualAI Senior Advisor Nuala O’Connor sit down with John Bailey, Nonresident Senior Fellow at American Enterprise Institute, where he focuses on the intersection of technology and innovation in areas such as education and the workforce, and how to leverage the benefits of emerging technologies while minimizing the risks. John shares his thoughts on the challenges and opportunities that AI presents, including in education, healthcare/mental health and the workforce, and why he is optimistic about the future of AI and new possibilities being opened up by generative AI and agentic AI. He also shares his thoughts on the importance of AI literacy and ensuring people’s trust in AI, as well as ways that AI frontier labs, educators and others can help guide a path toward safe and beneficial AI adoption.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>AI Governance and the Role of Boards of Directors f/t Joshua Geltzer and Jessica Lewis of WilmerHale</title>
      <itunes:episode>119</itunes:episode>
      <podcast:episode>119</podcast:episode>
      <itunes:title>AI Governance and the Role of Boards of Directors f/t Joshua Geltzer and Jessica Lewis of WilmerHale</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">d467b908-673d-4722-82ff-65f5a36059dd</guid>
      <link>https://share.transistor.fm/s/00138463</link>
      <description>
        <![CDATA[<p>EqualAI is thrilled to share the publication of our <a href="https://www.equalai.org/ai-governance-playbook-for-boards/">AI Governance Playbook for Boards</a>—a practical guide to help Boards of Directors responsibly oversee AI while balancing risk management, value creation, and emerging legal and compliance obligations. On this episode of <em>In AI We Trust?, </em>EqualAI President and CEO Miriam Vogel sits down with our partners at WilmerHale, Jessica Lewis and Joshua Geltzer, to discuss the newly released Playbook, and Jessica and Joshua break down why Board-level engagement in AI governance is no longer optional—but both a best practice and, increasingly, a legal requirement. </p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>EqualAI is thrilled to share the publication of our <a href="https://www.equalai.org/ai-governance-playbook-for-boards/">AI Governance Playbook for Boards</a>—a practical guide to help Boards of Directors responsibly oversee AI while balancing risk management, value creation, and emerging legal and compliance obligations. On this episode of <em>In AI We Trust?, </em>EqualAI President and CEO Miriam Vogel sits down with our partners at WilmerHale, Jessica Lewis and Joshua Geltzer, to discuss the newly released Playbook, and Jessica and Joshua break down why Board-level engagement in AI governance is no longer optional—but both a best practice and, increasingly, a legal requirement. </p>]]>
      </content:encoded>
      <pubDate>Thu, 22 Jan 2026 09:43:16 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/00138463/4cacce31.mp3" length="40002897" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2857</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>EqualAI is thrilled to share the publication of our <a href="https://www.equalai.org/ai-governance-playbook-for-boards/">AI Governance Playbook for Boards</a>—a practical guide to help Boards of Directors responsibly oversee AI while balancing risk management, value creation, and emerging legal and compliance obligations. On this episode of <em>In AI We Trust?, </em>EqualAI President and CEO Miriam Vogel sits down with our partners at WilmerHale, Jessica Lewis and Joshua Geltzer, to discuss the newly released Playbook, and Jessica and Joshua break down why Board-level engagement in AI governance is no longer optional—but both a best practice and, increasingly, a legal requirement. </p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>EqualAI End of Year Podcast Episode + Governing the Machine Excerpt </title>
      <itunes:episode>118</itunes:episode>
      <podcast:episode>118</podcast:episode>
      <itunes:title>EqualAI End of Year Podcast Episode + Governing the Machine Excerpt </itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">b28a0581-4110-4795-8172-13ef8c885a34</guid>
      <link>https://share.transistor.fm/s/f37c6097</link>
      <description>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel offers a special sneak peak into <a href="https://linktr.ee/governingthemachine"><em>Governing the Machine</em></a>, the book she co-authored and released this year, which is available online and in-stores and on <a href="https://www.audible.com/pd/Governing-the-Machine-Audiobook/B0FTHB4319?source_code=ASSGB149080119000H&amp;share_location=pdp">audible</a> (with a familiar voice). <strong>In this special episode,  Miriam Vogel reflects on a pivotal year for AI—and what it means for all of us.</strong> She shares key moments from 2025, including highlights on the work advanced alongside partners across industry, government, and civil society, and why this growing community matters as AI reshapes how we work, learn, and govern. The episode concludes with the exclusive excerpt from a chapter in <em>Governing the Machine</em>, offering a deeper look at how we got here—and how we move forward with clarity and trust. We’re excited to continue the conversation in 2026 with new guests, expanded AI literacy discussions, and timely insights you won’t want to miss.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel offers a special sneak peak into <a href="https://linktr.ee/governingthemachine"><em>Governing the Machine</em></a>, the book she co-authored and released this year, which is available online and in-stores and on <a href="https://www.audible.com/pd/Governing-the-Machine-Audiobook/B0FTHB4319?source_code=ASSGB149080119000H&amp;share_location=pdp">audible</a> (with a familiar voice). <strong>In this special episode,  Miriam Vogel reflects on a pivotal year for AI—and what it means for all of us.</strong> She shares key moments from 2025, including highlights on the work advanced alongside partners across industry, government, and civil society, and why this growing community matters as AI reshapes how we work, learn, and govern. The episode concludes with the exclusive excerpt from a chapter in <em>Governing the Machine</em>, offering a deeper look at how we got here—and how we move forward with clarity and trust. We’re excited to continue the conversation in 2026 with new guests, expanded AI literacy discussions, and timely insights you won’t want to miss.</p>]]>
      </content:encoded>
      <pubDate>Thu, 18 Dec 2025 17:36:01 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/f37c6097/bcb5d169.mp3" length="26624241" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1894</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel offers a special sneak peak into <a href="https://linktr.ee/governingthemachine"><em>Governing the Machine</em></a>, the book she co-authored and released this year, which is available online and in-stores and on <a href="https://www.audible.com/pd/Governing-the-Machine-Audiobook/B0FTHB4319?source_code=ASSGB149080119000H&amp;share_location=pdp">audible</a> (with a familiar voice). <strong>In this special episode,  Miriam Vogel reflects on a pivotal year for AI—and what it means for all of us.</strong> She shares key moments from 2025, including highlights on the work advanced alongside partners across industry, government, and civil society, and why this growing community matters as AI reshapes how we work, learn, and govern. The episode concludes with the exclusive excerpt from a chapter in <em>Governing the Machine</em>, offering a deeper look at how we got here—and how we move forward with clarity and trust. We’re excited to continue the conversation in 2026 with new guests, expanded AI literacy discussions, and timely insights you won’t want to miss.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>AI Literacy Series Ep. 14: Chattanooga Mayor Tim Kelly and Heartland Forward President Angie Cooper</title>
      <itunes:episode>117</itunes:episode>
      <podcast:episode>117</podcast:episode>
      <itunes:title>AI Literacy Series Ep. 14: Chattanooga Mayor Tim Kelly and Heartland Forward President Angie Cooper</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">15ceb146-bc61-429f-b4cc-2a4391b664bf</guid>
      <link>https://share.transistor.fm/s/648adb48</link>
      <description>
        <![CDATA[<p>From November 11-12, 2025, EqualAI and Heartland Forward organized a convening with Mayor Tim Kelly of Chattanooga, Tennessee focused on AI in education and the workforce. The event brought together approximately 60 leaders from the City of Chattanooga and across the State of Tennessee to examine how AI is reshaping the education and jobs landscape and discuss the risks and opportunities of integrating AI into schools and workplaces. In Chattanooga, EqualAI President and CEO Miriam Vogel sat down with Mayor Kelly and Heartland Forward President Angie Cooper for a podcast recording of <a href="https://equalai.transistor.fm/"><em>In AI We Trust?</em></a><em> </em>During the discussion, Mayor Kelly discussed real-world examples of how the City of Chattanooga is using AI in practical ways to help improve everyday quality of life for city residents, and Angie shared about how her organization, a “think-and-do-tank,” has launched the first-ever <a href="https://heartlandforward.org/news/heartland-forward-launches-first-ever-heartland-ai-caucus/">Heartland AI Caucus</a>. Learn about all this and much more in this latest episode!</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>From November 11-12, 2025, EqualAI and Heartland Forward organized a convening with Mayor Tim Kelly of Chattanooga, Tennessee focused on AI in education and the workforce. The event brought together approximately 60 leaders from the City of Chattanooga and across the State of Tennessee to examine how AI is reshaping the education and jobs landscape and discuss the risks and opportunities of integrating AI into schools and workplaces. In Chattanooga, EqualAI President and CEO Miriam Vogel sat down with Mayor Kelly and Heartland Forward President Angie Cooper for a podcast recording of <a href="https://equalai.transistor.fm/"><em>In AI We Trust?</em></a><em> </em>During the discussion, Mayor Kelly discussed real-world examples of how the City of Chattanooga is using AI in practical ways to help improve everyday quality of life for city residents, and Angie shared about how her organization, a “think-and-do-tank,” has launched the first-ever <a href="https://heartlandforward.org/news/heartland-forward-launches-first-ever-heartland-ai-caucus/">Heartland AI Caucus</a>. Learn about all this and much more in this latest episode!</p>]]>
      </content:encoded>
      <pubDate>Tue, 02 Dec 2025 08:36:47 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/648adb48/780c75b8.mp3" length="45695109" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2855</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>From November 11-12, 2025, EqualAI and Heartland Forward organized a convening with Mayor Tim Kelly of Chattanooga, Tennessee focused on AI in education and the workforce. The event brought together approximately 60 leaders from the City of Chattanooga and across the State of Tennessee to examine how AI is reshaping the education and jobs landscape and discuss the risks and opportunities of integrating AI into schools and workplaces. In Chattanooga, EqualAI President and CEO Miriam Vogel sat down with Mayor Kelly and Heartland Forward President Angie Cooper for a podcast recording of <a href="https://equalai.transistor.fm/"><em>In AI We Trust?</em></a><em> </em>During the discussion, Mayor Kelly discussed real-world examples of how the City of Chattanooga is using AI in practical ways to help improve everyday quality of life for city residents, and Angie shared about how her organization, a “think-and-do-tank,” has launched the first-ever <a href="https://heartlandforward.org/news/heartland-forward-launches-first-ever-heartland-ai-caucus/">Heartland AI Caucus</a>. Learn about all this and much more in this latest episode!</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Axios Chief Technology Correspondent Ina Fried on AI and This Moment in Technology</title>
      <itunes:episode>116</itunes:episode>
      <podcast:episode>116</podcast:episode>
      <itunes:title>Axios Chief Technology Correspondent Ina Fried on AI and This Moment in Technology</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">b694f906-692c-4eb4-ae21-10dfcf1ecb61</guid>
      <link>https://share.transistor.fm/s/9bf37bfa</link>
      <description>
        <![CDATA[<p>On this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel speaks with <em>Axios </em>Chief Technology Correspondent Ina Fried about how AI and this moment in technology compares to other earlier moments of transformation and Ina’s role as a journalist writing the <a href="https://www.axios.com/signup/ai-plus"><em>Axios </em>AI+ newsletter</a> and putting together <em>Axios’s</em> AI+ Summits. In the episode, Ina discusses how AI is impacting jobs and her own work as a journalist, the complexity involved in building AI into human processes, and her hopes for the future of AI. You will not want to miss Ina’s sharp insights in this latest episode. Tune in now!</p><p><br></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>On this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel speaks with <em>Axios </em>Chief Technology Correspondent Ina Fried about how AI and this moment in technology compares to other earlier moments of transformation and Ina’s role as a journalist writing the <a href="https://www.axios.com/signup/ai-plus"><em>Axios </em>AI+ newsletter</a> and putting together <em>Axios’s</em> AI+ Summits. In the episode, Ina discusses how AI is impacting jobs and her own work as a journalist, the complexity involved in building AI into human processes, and her hopes for the future of AI. You will not want to miss Ina’s sharp insights in this latest episode. Tune in now!</p><p><br></p>]]>
      </content:encoded>
      <pubDate>Mon, 17 Nov 2025 12:51:42 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/9bf37bfa/24707447.mp3" length="62413216" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2600</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>On this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel speaks with <em>Axios </em>Chief Technology Correspondent Ina Fried about how AI and this moment in technology compares to other earlier moments of transformation and Ina’s role as a journalist writing the <a href="https://www.axios.com/signup/ai-plus"><em>Axios </em>AI+ newsletter</a> and putting together <em>Axios’s</em> AI+ Summits. In the episode, Ina discusses how AI is impacting jobs and her own work as a journalist, the complexity involved in building AI into human processes, and her hopes for the future of AI. You will not want to miss Ina’s sharp insights in this latest episode. Tune in now!</p><p><br></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Governing the Machine Book Launch – Miriam Vogel, Paul Dongha, and Ray Eitel-Porter</title>
      <itunes:episode>115</itunes:episode>
      <podcast:episode>115</podcast:episode>
      <itunes:title>Governing the Machine Book Launch – Miriam Vogel, Paul Dongha, and Ray Eitel-Porter</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">270fbd90-2646-4cfa-8bd5-5547848ad063</guid>
      <link>https://share.transistor.fm/s/2d5281ee</link>
      <description>
        <![CDATA[<p>HOT OFF THE PRESSES: In this special episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel is joined by her two co-authors of <a href="https://www.amazon.com/Governing-Machine-navigate-unlock-potential/dp/139942629X/ref=sr_1_15?dib=eyJ2IjoiMSJ9.DXFerTuSKMYRzXbM0KVQYo6_o5JifqGPy18Y9liLx-xZRaMHLpHiCgF-bjCD1kmHSIdbfGiMitSPrlCjSVl42s-16vvRmyPivihgqJY7BjgOS3RK4UQ61V1cLhnpFYTUiAZa96ChYlLUI9lPrKxo2W5puWIPVqOU72-sHrZQau0Gxd_Yr7jc82Yn1-lsySkjJ8UH1WB3-JpxDxbb0P8djjy-FOS5OPXvYeksn07Dbo0.ZUNtH6Pjyw3aO1jJPjQySySvHpKrO0ywtZYU2cw9hPI&amp;dib_tag=se&amp;qid=1761080160&amp;refinements=p_n_publication_date%3A1250228011&amp;s=books&amp;sr=1-15"><em>Governing the Machine: How to navigate the risks of AI and unlock its true potential</em></a>, Dr. Paul Dongha, Head of Responsible AI and AI Strategy at NatWest Group, and Ray Eitel-Porter, Accenture Luminary and Senior Research Associate at the Intellectual Forum, Jesus College, Cambridge, to launch their new book released TODAY (October 28, 2025). Miriam, Paul, and Ray share their motivation for writing the book, some of the big takeaways on AI governance, why it is for companies and consumers alike, and what they hope readers will learn from their book. We hope that you enjoy this episode, and please be sure to purchase a copy of <em>Governing the Machine</em> at the link above! And share your feedback at <a href="mailto:contact@equalai.org">contact@equalai.org</a>! </p><p><br></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>HOT OFF THE PRESSES: In this special episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel is joined by her two co-authors of <a href="https://www.amazon.com/Governing-Machine-navigate-unlock-potential/dp/139942629X/ref=sr_1_15?dib=eyJ2IjoiMSJ9.DXFerTuSKMYRzXbM0KVQYo6_o5JifqGPy18Y9liLx-xZRaMHLpHiCgF-bjCD1kmHSIdbfGiMitSPrlCjSVl42s-16vvRmyPivihgqJY7BjgOS3RK4UQ61V1cLhnpFYTUiAZa96ChYlLUI9lPrKxo2W5puWIPVqOU72-sHrZQau0Gxd_Yr7jc82Yn1-lsySkjJ8UH1WB3-JpxDxbb0P8djjy-FOS5OPXvYeksn07Dbo0.ZUNtH6Pjyw3aO1jJPjQySySvHpKrO0ywtZYU2cw9hPI&amp;dib_tag=se&amp;qid=1761080160&amp;refinements=p_n_publication_date%3A1250228011&amp;s=books&amp;sr=1-15"><em>Governing the Machine: How to navigate the risks of AI and unlock its true potential</em></a>, Dr. Paul Dongha, Head of Responsible AI and AI Strategy at NatWest Group, and Ray Eitel-Porter, Accenture Luminary and Senior Research Associate at the Intellectual Forum, Jesus College, Cambridge, to launch their new book released TODAY (October 28, 2025). Miriam, Paul, and Ray share their motivation for writing the book, some of the big takeaways on AI governance, why it is for companies and consumers alike, and what they hope readers will learn from their book. We hope that you enjoy this episode, and please be sure to purchase a copy of <em>Governing the Machine</em> at the link above! And share your feedback at <a href="mailto:contact@equalai.org">contact@equalai.org</a>! </p><p><br></p>]]>
      </content:encoded>
      <pubDate>Tue, 28 Oct 2025 08:50:40 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/2d5281ee/00e3172f.mp3" length="58857068" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2450</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>HOT OFF THE PRESSES: In this special episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel is joined by her two co-authors of <a href="https://www.amazon.com/Governing-Machine-navigate-unlock-potential/dp/139942629X/ref=sr_1_15?dib=eyJ2IjoiMSJ9.DXFerTuSKMYRzXbM0KVQYo6_o5JifqGPy18Y9liLx-xZRaMHLpHiCgF-bjCD1kmHSIdbfGiMitSPrlCjSVl42s-16vvRmyPivihgqJY7BjgOS3RK4UQ61V1cLhnpFYTUiAZa96ChYlLUI9lPrKxo2W5puWIPVqOU72-sHrZQau0Gxd_Yr7jc82Yn1-lsySkjJ8UH1WB3-JpxDxbb0P8djjy-FOS5OPXvYeksn07Dbo0.ZUNtH6Pjyw3aO1jJPjQySySvHpKrO0ywtZYU2cw9hPI&amp;dib_tag=se&amp;qid=1761080160&amp;refinements=p_n_publication_date%3A1250228011&amp;s=books&amp;sr=1-15"><em>Governing the Machine: How to navigate the risks of AI and unlock its true potential</em></a>, Dr. Paul Dongha, Head of Responsible AI and AI Strategy at NatWest Group, and Ray Eitel-Porter, Accenture Luminary and Senior Research Associate at the Intellectual Forum, Jesus College, Cambridge, to launch their new book released TODAY (October 28, 2025). Miriam, Paul, and Ray share their motivation for writing the book, some of the big takeaways on AI governance, why it is for companies and consumers alike, and what they hope readers will learn from their book. We hope that you enjoy this episode, and please be sure to purchase a copy of <em>Governing the Machine</em> at the link above! And share your feedback at <a href="mailto:contact@equalai.org">contact@equalai.org</a>! </p><p><br></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Nicholas Thompson, CEO of The Atlantic, on Leading The Atlantic through the “AI Hurricane Coming Through”</title>
      <itunes:episode>114</itunes:episode>
      <podcast:episode>114</podcast:episode>
      <itunes:title>Nicholas Thompson, CEO of The Atlantic, on Leading The Atlantic through the “AI Hurricane Coming Through”</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">47c4c05e-1a71-4072-ac7c-e25513926869</guid>
      <link>https://share.transistor.fm/s/1d42e5b2</link>
      <description>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel sits down with Nicholas Thompson, CEO of <em>The Atlantic</em>, for a fascinating discussion about the “AI hurricane coming through” and how he sees the role of journalists when it comes to reporting on new technological developments and promoting AI literacy. During the episode, Nicholas shares his wish list of AI policies and regulations, the most interesting thing in tech he has seen recently, his thoughts on the most important things people should be paying attention to as they work to understand new developments in the AI space, and much, much more.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel sits down with Nicholas Thompson, CEO of <em>The Atlantic</em>, for a fascinating discussion about the “AI hurricane coming through” and how he sees the role of journalists when it comes to reporting on new technological developments and promoting AI literacy. During the episode, Nicholas shares his wish list of AI policies and regulations, the most interesting thing in tech he has seen recently, his thoughts on the most important things people should be paying attention to as they work to understand new developments in the AI space, and much, much more.</p>]]>
      </content:encoded>
      <pubDate>Mon, 20 Oct 2025 17:36:51 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/1d42e5b2/b38c4456.mp3" length="58423942" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2431</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel sits down with Nicholas Thompson, CEO of <em>The Atlantic</em>, for a fascinating discussion about the “AI hurricane coming through” and how he sees the role of journalists when it comes to reporting on new technological developments and promoting AI literacy. During the episode, Nicholas shares his wish list of AI policies and regulations, the most interesting thing in tech he has seen recently, his thoughts on the most important things people should be paying attention to as they work to understand new developments in the AI space, and much, much more.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>AI Literacy Series Ep. 13: Steven Overly (POLITICO) and Miriam Vogel (EqualAI): AI and the Changing Tech Landscape - A Reporter’s View</title>
      <itunes:episode>113</itunes:episode>
      <podcast:episode>113</podcast:episode>
      <itunes:title>AI Literacy Series Ep. 13: Steven Overly (POLITICO) and Miriam Vogel (EqualAI): AI and the Changing Tech Landscape - A Reporter’s View</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">9665cac5-95be-471e-bc10-dbf5b0e2322f</guid>
      <link>https://share.transistor.fm/s/98142a5c</link>
      <description>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel interviews Steven Overly, host of POLITICO Tech’s weekly podcast and editorial director of POLITICO Live. Steven shares highlights from POLITICO’s 2025 AI &amp; Tech Summit and his thoughts on how the tech landscape has changed since he first started reporting on it as a journalist. Steven and Miriam also touch on topics including AI in government, AI and the workforce, and how to bring nuance into conversations around AI and ensure that more people are included in conversations about how it will impact their lives.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel interviews Steven Overly, host of POLITICO Tech’s weekly podcast and editorial director of POLITICO Live. Steven shares highlights from POLITICO’s 2025 AI &amp; Tech Summit and his thoughts on how the tech landscape has changed since he first started reporting on it as a journalist. Steven and Miriam also touch on topics including AI in government, AI and the workforce, and how to bring nuance into conversations around AI and ensure that more people are included in conversations about how it will impact their lives.</p>]]>
      </content:encoded>
      <pubDate>Wed, 01 Oct 2025 09:17:31 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/98142a5c/d336a5d7.mp3" length="59017476" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2456</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, EqualAI President and CEO Miriam Vogel interviews Steven Overly, host of POLITICO Tech’s weekly podcast and editorial director of POLITICO Live. Steven shares highlights from POLITICO’s 2025 AI &amp; Tech Summit and his thoughts on how the tech landscape has changed since he first started reporting on it as a journalist. Steven and Miriam also touch on topics including AI in government, AI and the workforce, and how to bring nuance into conversations around AI and ensure that more people are included in conversations about how it will impact their lives.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>AI Literacy Series Ep. 12: the World Economic Forum’s Daniel Dobrygowski and Karla Yee Amezaga</title>
      <itunes:episode>112</itunes:episode>
      <podcast:episode>112</podcast:episode>
      <itunes:title>AI Literacy Series Ep. 12: the World Economic Forum’s Daniel Dobrygowski and Karla Yee Amezaga</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">bb9a9c87-bd8d-4eb7-b3df-cee2deca1cd8</guid>
      <link>https://share.transistor.fm/s/45b36f11</link>
      <description>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, cohosts Miriam Vogel and Nuala O’Connor speak with Daniel Dobrygowski, Head of Governance and Trust at the World Economic Forum (WEF), and Karla Yee Amezaga, Initiatives Lead for AI and Data Governance with WEF’s Centre for AI Excellence about the importance of building trust in technology, strengthening AI and digital literacy, and modernizing boards to be fit for purpose for the modern era. They discuss EqualAI’s and WEF’s  new playbooks, including WEF’s Playbook on Advancing Responsible AI Innovation and EqualAI’s new AI Governance Playbook. Find the WEF Playbook <a href="https://www.weforum.org/publications/advancing-responsible-ai-innovation-a-playbook/">here</a> and EqualAI’s AI Governance Playbook <a href="https://www.equalai.org/">here</a>.</p><p><br></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, cohosts Miriam Vogel and Nuala O’Connor speak with Daniel Dobrygowski, Head of Governance and Trust at the World Economic Forum (WEF), and Karla Yee Amezaga, Initiatives Lead for AI and Data Governance with WEF’s Centre for AI Excellence about the importance of building trust in technology, strengthening AI and digital literacy, and modernizing boards to be fit for purpose for the modern era. They discuss EqualAI’s and WEF’s  new playbooks, including WEF’s Playbook on Advancing Responsible AI Innovation and EqualAI’s new AI Governance Playbook. Find the WEF Playbook <a href="https://www.weforum.org/publications/advancing-responsible-ai-innovation-a-playbook/">here</a> and EqualAI’s AI Governance Playbook <a href="https://www.equalai.org/">here</a>.</p><p><br></p>]]>
      </content:encoded>
      <pubDate>Mon, 22 Sep 2025 11:38:07 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/45b36f11/48ba0bc7.mp3" length="67025934" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2792</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, cohosts Miriam Vogel and Nuala O’Connor speak with Daniel Dobrygowski, Head of Governance and Trust at the World Economic Forum (WEF), and Karla Yee Amezaga, Initiatives Lead for AI and Data Governance with WEF’s Centre for AI Excellence about the importance of building trust in technology, strengthening AI and digital literacy, and modernizing boards to be fit for purpose for the modern era. They discuss EqualAI’s and WEF’s  new playbooks, including WEF’s Playbook on Advancing Responsible AI Innovation and EqualAI’s new AI Governance Playbook. Find the WEF Playbook <a href="https://www.weforum.org/publications/advancing-responsible-ai-innovation-a-playbook/">here</a> and EqualAI’s AI Governance Playbook <a href="https://www.equalai.org/">here</a>.</p><p><br></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>AI Literacy Series Ep. 11: Adam Thierer on AI, Innovation &amp; Tech Policy</title>
      <itunes:episode>111</itunes:episode>
      <podcast:episode>111</podcast:episode>
      <itunes:title>AI Literacy Series Ep. 11: Adam Thierer on AI, Innovation &amp; Tech Policy</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">2b9073c8-50ac-4afd-a063-0fc29df5df62</guid>
      <link>https://share.transistor.fm/s/28e06cd3</link>
      <description>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, cohosts Miriam Vogel and Nuala O’Connor are joined by Adam Thierer, resident senior fellow @ R Street's Tech &amp; Innovation team. Adam weighs in on the Trump Administration’s <a href="https://www.whitehouse.gov/wp-content/uploads/2025/07/Americas-AI-Action-Plan.pdf">AI Action Plan</a>, the importance of Congress in developing AI policy, and existing legal principles and practices that help define the new digital and AI age. They focused on the mandate for AI literacy, as well as the necessity of AI technologies being regulated in a transparent and trustworthy way that end users, and particularly consumers, can understand.</p><p><br></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, cohosts Miriam Vogel and Nuala O’Connor are joined by Adam Thierer, resident senior fellow @ R Street's Tech &amp; Innovation team. Adam weighs in on the Trump Administration’s <a href="https://www.whitehouse.gov/wp-content/uploads/2025/07/Americas-AI-Action-Plan.pdf">AI Action Plan</a>, the importance of Congress in developing AI policy, and existing legal principles and practices that help define the new digital and AI age. They focused on the mandate for AI literacy, as well as the necessity of AI technologies being regulated in a transparent and trustworthy way that end users, and particularly consumers, can understand.</p><p><br></p>]]>
      </content:encoded>
      <pubDate>Tue, 05 Aug 2025 09:41:31 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/28e06cd3/793fd3b2.mp3" length="84507820" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3520</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, cohosts Miriam Vogel and Nuala O’Connor are joined by Adam Thierer, resident senior fellow @ R Street's Tech &amp; Innovation team. Adam weighs in on the Trump Administration’s <a href="https://www.whitehouse.gov/wp-content/uploads/2025/07/Americas-AI-Action-Plan.pdf">AI Action Plan</a>, the importance of Congress in developing AI policy, and existing legal principles and practices that help define the new digital and AI age. They focused on the mandate for AI literacy, as well as the necessity of AI technologies being regulated in a transparent and trustworthy way that end users, and particularly consumers, can understand.</p><p><br></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>AI Literacy Series Ep. 10: Angie Cooper’s Call to Action for the Heartland</title>
      <itunes:episode>110</itunes:episode>
      <podcast:episode>110</podcast:episode>
      <itunes:title>AI Literacy Series Ep. 10: Angie Cooper’s Call to Action for the Heartland</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">8515ce9d-23a9-495f-bcaa-ef8009f538d5</guid>
      <link>https://share.transistor.fm/s/59790e6f</link>
      <description>
        <![CDATA[<p>In Episode 10 of the In AI We Trust? AI Literacy series, Angie Cooper’s Call to Action for the Heartland, Miriam Vogel talks with Angie Cooper, President and Chief Operating Officer of Heartland Forward, to explore how artificial intelligence (AI) can accelerate economic growth across America’s Heartland. The discussion follows Heartland Forward’s recent annual Heartland Summit, the data-driven insights that inform their mission, and their partnership with Stemuli to create a first-of-its-kind AI literacy video game to promote AI learning for rural students. Angie stresses the importance of increasing access to AI by expanding affordable, high-speed internet and building trust with AI platforms through education initiatives and open conversations with teachers and employers. This episode explores how AI can be utilized as a tool to benefit small businesses, prepare students for the workforce, and advance jobs throughout the Heartland and beyond. </p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In Episode 10 of the In AI We Trust? AI Literacy series, Angie Cooper’s Call to Action for the Heartland, Miriam Vogel talks with Angie Cooper, President and Chief Operating Officer of Heartland Forward, to explore how artificial intelligence (AI) can accelerate economic growth across America’s Heartland. The discussion follows Heartland Forward’s recent annual Heartland Summit, the data-driven insights that inform their mission, and their partnership with Stemuli to create a first-of-its-kind AI literacy video game to promote AI learning for rural students. Angie stresses the importance of increasing access to AI by expanding affordable, high-speed internet and building trust with AI platforms through education initiatives and open conversations with teachers and employers. This episode explores how AI can be utilized as a tool to benefit small businesses, prepare students for the workforce, and advance jobs throughout the Heartland and beyond. </p>]]>
      </content:encoded>
      <pubDate>Tue, 24 Jun 2025 14:15:14 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/59790e6f/adb3b2fc.mp3" length="53780009" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2240</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In Episode 10 of the In AI We Trust? AI Literacy series, Angie Cooper’s Call to Action for the Heartland, Miriam Vogel talks with Angie Cooper, President and Chief Operating Officer of Heartland Forward, to explore how artificial intelligence (AI) can accelerate economic growth across America’s Heartland. The discussion follows Heartland Forward’s recent annual Heartland Summit, the data-driven insights that inform their mission, and their partnership with Stemuli to create a first-of-its-kind AI literacy video game to promote AI learning for rural students. Angie stresses the importance of increasing access to AI by expanding affordable, high-speed internet and building trust with AI platforms through education initiatives and open conversations with teachers and employers. This episode explores how AI can be utilized as a tool to benefit small businesses, prepare students for the workforce, and advance jobs throughout the Heartland and beyond. </p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>AI Literacy Series Ep. 9: Robbie Torney of Common Sense Media &amp; Special Co-host Nuala O’Connor</title>
      <itunes:episode>109</itunes:episode>
      <podcast:episode>109</podcast:episode>
      <itunes:title>AI Literacy Series Ep. 9: Robbie Torney of Common Sense Media &amp; Special Co-host Nuala O’Connor</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">da6cfc21-3ac1-444e-928f-74abb11f616d</guid>
      <link>https://share.transistor.fm/s/18b5aad0</link>
      <description>
        <![CDATA[<p>In this episode of<em> In AI We Trust?</em>, Miriam &amp; Nuala speak with Common Sense Senior Director of AI Programs Robbie Torney to discuss AI's impact on children, families, and schools, focusing on AI literacy, which builds upon media and digital literacy. Robbie advises parents to engage in tech conversations with curiosity and empathy and encourages educators to view AI as a tool to enhance learning, noting students' prevalent use. Common Sense Media provides AI training and risk assessments for educators. Torney aims to bridge digital divides and supports AI implementation in underserved schools, highlighting risks of AI companions for vulnerable youth and developing resources for school AI readiness and risk assessments. The episode stresses the importance of AI literacy and critical thinking to navigate AI's complexities and minimize harm.</p><p><br></p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series provides listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p><br></p><p>Related Resources</p><ul><li>Episode Blog Post</li><li><a href="https://www.commonsensemedia.org/ai-ratings/ai-risk-assessments">AI Risk Assessments</a></li><li><a href="https://www.commonsense.org/education/events/training-course-ai-basics-for-k-12-teachers">AI Basics for K–12 Teachers</a></li><li><a href="https://www.commonsensemedia.org/articles/parents-ultimate-guide-to-ai-companions-and-relationships">Parents’ Ultimate Guide to AI Companions and Relationships</a></li><li><a href="https://www.commonsensemedia.org/sites/default/files/research/report/2025-common-sense-census-web-2.pdf">2025: The Common Sense Census</a></li><li><a href="https://www.commonsensemedia.org/sites/default/files/research/report/2024-the-dawn-of-the-ai-era_final-release-for-web.pdf">2024: The Dawn of the AI Era</a></li></ul>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode of<em> In AI We Trust?</em>, Miriam &amp; Nuala speak with Common Sense Senior Director of AI Programs Robbie Torney to discuss AI's impact on children, families, and schools, focusing on AI literacy, which builds upon media and digital literacy. Robbie advises parents to engage in tech conversations with curiosity and empathy and encourages educators to view AI as a tool to enhance learning, noting students' prevalent use. Common Sense Media provides AI training and risk assessments for educators. Torney aims to bridge digital divides and supports AI implementation in underserved schools, highlighting risks of AI companions for vulnerable youth and developing resources for school AI readiness and risk assessments. The episode stresses the importance of AI literacy and critical thinking to navigate AI's complexities and minimize harm.</p><p><br></p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series provides listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p><br></p><p>Related Resources</p><ul><li>Episode Blog Post</li><li><a href="https://www.commonsensemedia.org/ai-ratings/ai-risk-assessments">AI Risk Assessments</a></li><li><a href="https://www.commonsense.org/education/events/training-course-ai-basics-for-k-12-teachers">AI Basics for K–12 Teachers</a></li><li><a href="https://www.commonsensemedia.org/articles/parents-ultimate-guide-to-ai-companions-and-relationships">Parents’ Ultimate Guide to AI Companions and Relationships</a></li><li><a href="https://www.commonsensemedia.org/sites/default/files/research/report/2025-common-sense-census-web-2.pdf">2025: The Common Sense Census</a></li><li><a href="https://www.commonsensemedia.org/sites/default/files/research/report/2024-the-dawn-of-the-ai-era_final-release-for-web.pdf">2024: The Dawn of the AI Era</a></li></ul>]]>
      </content:encoded>
      <pubDate>Tue, 10 Jun 2025 17:38:03 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/18b5aad0/20d3001e.mp3" length="100553845" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>4189</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode of<em> In AI We Trust?</em>, Miriam &amp; Nuala speak with Common Sense Senior Director of AI Programs Robbie Torney to discuss AI's impact on children, families, and schools, focusing on AI literacy, which builds upon media and digital literacy. Robbie advises parents to engage in tech conversations with curiosity and empathy and encourages educators to view AI as a tool to enhance learning, noting students' prevalent use. Common Sense Media provides AI training and risk assessments for educators. Torney aims to bridge digital divides and supports AI implementation in underserved schools, highlighting risks of AI companions for vulnerable youth and developing resources for school AI readiness and risk assessments. The episode stresses the importance of AI literacy and critical thinking to navigate AI's complexities and minimize harm.</p><p><br></p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series provides listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p><br></p><p>Related Resources</p><ul><li>Episode Blog Post</li><li><a href="https://www.commonsensemedia.org/ai-ratings/ai-risk-assessments">AI Risk Assessments</a></li><li><a href="https://www.commonsense.org/education/events/training-course-ai-basics-for-k-12-teachers">AI Basics for K–12 Teachers</a></li><li><a href="https://www.commonsensemedia.org/articles/parents-ultimate-guide-to-ai-companions-and-relationships">Parents’ Ultimate Guide to AI Companions and Relationships</a></li><li><a href="https://www.commonsensemedia.org/sites/default/files/research/report/2025-common-sense-census-web-2.pdf">2025: The Common Sense Census</a></li><li><a href="https://www.commonsensemedia.org/sites/default/files/research/report/2024-the-dawn-of-the-ai-era_final-release-for-web.pdf">2024: The Dawn of the AI Era</a></li></ul>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>SPECIAL EDITION A Pre-Summit Conversation With Van Jones </title>
      <itunes:episode>108</itunes:episode>
      <podcast:episode>108</podcast:episode>
      <itunes:title>SPECIAL EDITION A Pre-Summit Conversation With Van Jones </itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">9df298fc-9864-438c-b58d-f7826739547e</guid>
      <link>https://share.transistor.fm/s/edf1e173</link>
      <description>
        <![CDATA[<p>In this special episode of <em>In AI We Trust?</em>, recorded live at the launch of the EqualAI C-Suite Summit in Washington, D.C., host Miriam Vogel sits down with the dynamic Van Jones — acclaimed social entrepreneur, innovator, and tech evangelist. Together, they dive into a thought-provoking conversation about how AI can be a  transformative force for opportunity creation. With his trademark clarity and conviction, Van offers a hopeful vision for the future of AI— one that empowers communities and drives societal progress, but only if we lead with the right values and policies at this critical moment.</p><p><br></p><p>Related Resources</p><ul><li><a href="https://www.dreammachine.org/aixlibrary">Dream Machine AI x Library Project</a></li></ul>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this special episode of <em>In AI We Trust?</em>, recorded live at the launch of the EqualAI C-Suite Summit in Washington, D.C., host Miriam Vogel sits down with the dynamic Van Jones — acclaimed social entrepreneur, innovator, and tech evangelist. Together, they dive into a thought-provoking conversation about how AI can be a  transformative force for opportunity creation. With his trademark clarity and conviction, Van offers a hopeful vision for the future of AI— one that empowers communities and drives societal progress, but only if we lead with the right values and policies at this critical moment.</p><p><br></p><p>Related Resources</p><ul><li><a href="https://www.dreammachine.org/aixlibrary">Dream Machine AI x Library Project</a></li></ul>]]>
      </content:encoded>
      <pubDate>Wed, 28 May 2025 16:19:32 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/edf1e173/218dcabf.mp3" length="23331032" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>971</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this special episode of <em>In AI We Trust?</em>, recorded live at the launch of the EqualAI C-Suite Summit in Washington, D.C., host Miriam Vogel sits down with the dynamic Van Jones — acclaimed social entrepreneur, innovator, and tech evangelist. Together, they dive into a thought-provoking conversation about how AI can be a  transformative force for opportunity creation. With his trademark clarity and conviction, Van offers a hopeful vision for the future of AI— one that empowers communities and drives societal progress, but only if we lead with the right values and policies at this critical moment.</p><p><br></p><p>Related Resources</p><ul><li><a href="https://www.dreammachine.org/aixlibrary">Dream Machine AI x Library Project</a></li></ul>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>AI Literacy Series Ep. 7: Open Source Under Threat: Andrew Ng Issues a Call to Action</title>
      <itunes:episode>107</itunes:episode>
      <podcast:episode>107</podcast:episode>
      <itunes:title>AI Literacy Series Ep. 7: Open Source Under Threat: Andrew Ng Issues a Call to Action</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">82bce2e2-d4c5-4d1c-9a66-ca4d9cca6b9d</guid>
      <link>https://share.transistor.fm/s/3a6cf14f</link>
      <description>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, Dr. Andrew Ng joins co-hosts Miriam Vogel and Rosalind Wiseman to discuss AI literacy and the need for widespread AI understanding. Dr. Ng makes a call to action that everyone should learn to code, especially with AI-assisted coding becoming more accessible. The episode also addresses AI fears and misconceptions, highlighting the importance of learning about AI for increased productivity and potential career growth. The conversation further explores AI's potential for positive large-scale social impact, such as in climate modeling, and the challenges of conveying this potential amidst widespread fears across the general public. The discussion addresses AI's potential for social good, the urgent need for AI education and upskilling, and the complexities of AI integration in education. This important episode underscores Andrew, Miriam &amp; Rosalind’s belief in the transformative potential of AI when individuals are empowered to manage, adapt and build with it, fostering innovation across various sectors and in user’s daily lives.</p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/ai-literacy-series-dr-andrew-ng-on-scaling-ai-for-social-good/">Episode Blog Post</a></li><li><a href="https://equalai.transistor.fm/episodes/andrew-ng-should-we-fear-an-ai-driven-existential-crisis">Andrew Ng: Should we fear an AI-driven existential crisis?</a> (<em>In AI We Trust?</em>, Jan. 2024)</li><li><a href="https://www.deeplearning.ai/">DeepLearning.AI</a></li><li><a href="https://www.coursera.org/">Coursera</a></li></ul>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, Dr. Andrew Ng joins co-hosts Miriam Vogel and Rosalind Wiseman to discuss AI literacy and the need for widespread AI understanding. Dr. Ng makes a call to action that everyone should learn to code, especially with AI-assisted coding becoming more accessible. The episode also addresses AI fears and misconceptions, highlighting the importance of learning about AI for increased productivity and potential career growth. The conversation further explores AI's potential for positive large-scale social impact, such as in climate modeling, and the challenges of conveying this potential amidst widespread fears across the general public. The discussion addresses AI's potential for social good, the urgent need for AI education and upskilling, and the complexities of AI integration in education. This important episode underscores Andrew, Miriam &amp; Rosalind’s belief in the transformative potential of AI when individuals are empowered to manage, adapt and build with it, fostering innovation across various sectors and in user’s daily lives.</p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/ai-literacy-series-dr-andrew-ng-on-scaling-ai-for-social-good/">Episode Blog Post</a></li><li><a href="https://equalai.transistor.fm/episodes/andrew-ng-should-we-fear-an-ai-driven-existential-crisis">Andrew Ng: Should we fear an AI-driven existential crisis?</a> (<em>In AI We Trust?</em>, Jan. 2024)</li><li><a href="https://www.deeplearning.ai/">DeepLearning.AI</a></li><li><a href="https://www.coursera.org/">Coursera</a></li></ul>]]>
      </content:encoded>
      <pubDate>Wed, 23 Apr 2025 08:43:24 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/3a6cf14f/2e799ec4.mp3" length="76604842" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3190</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em>, Dr. Andrew Ng joins co-hosts Miriam Vogel and Rosalind Wiseman to discuss AI literacy and the need for widespread AI understanding. Dr. Ng makes a call to action that everyone should learn to code, especially with AI-assisted coding becoming more accessible. The episode also addresses AI fears and misconceptions, highlighting the importance of learning about AI for increased productivity and potential career growth. The conversation further explores AI's potential for positive large-scale social impact, such as in climate modeling, and the challenges of conveying this potential amidst widespread fears across the general public. The discussion addresses AI's potential for social good, the urgent need for AI education and upskilling, and the complexities of AI integration in education. This important episode underscores Andrew, Miriam &amp; Rosalind’s belief in the transformative potential of AI when individuals are empowered to manage, adapt and build with it, fostering innovation across various sectors and in user’s daily lives.</p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/ai-literacy-series-dr-andrew-ng-on-scaling-ai-for-social-good/">Episode Blog Post</a></li><li><a href="https://equalai.transistor.fm/episodes/andrew-ng-should-we-fear-an-ai-driven-existential-crisis">Andrew Ng: Should we fear an AI-driven existential crisis?</a> (<em>In AI We Trust?</em>, Jan. 2024)</li><li><a href="https://www.deeplearning.ai/">DeepLearning.AI</a></li><li><a href="https://www.coursera.org/">Coursera</a></li></ul>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>AI Literacy Series Ep. 6: Bridging the Gap Between Technology and Communities with Susan Gonzales</title>
      <itunes:episode>106</itunes:episode>
      <podcast:episode>106</podcast:episode>
      <itunes:title>AI Literacy Series Ep. 6: Bridging the Gap Between Technology and Communities with Susan Gonzales</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">f6fbb664-6b09-4362-97fb-ee4a25ea6c59</guid>
      <link>https://share.transistor.fm/s/0fa70de9</link>
      <description>
        <![CDATA[<p>This episode of <em>In AI We Trust? </em>features co-hosts Miriam Vogel and Rosalind Wiseman continuing their AI literacy series with Susan Gonzales, CEO of AIandYou. The discussion centers on the critical need for basic AI literacy within marginalized communities to create opportunities and prevent an "AI divide." Susan emphasizes overcoming fear, building foundational AI knowledge, and understanding AI's impact on jobs and small businesses. She stresses the urgency of AI education and AIandYou's role in providing accessible resources.  The episode highlights the importance of dialogue and strategic partnerships to advance AI literacy, ensuring that everyone can benefit from AI's opportunities before the "window" closes.</p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/ai-literacy-series-bridging-the-gap-between-technology-and-communities-with-susan-gonzalez/">Episode Blog Post</a></li><li><a href="https://aiandyou.org/">AIandYou</a></li></ul>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>This episode of <em>In AI We Trust? </em>features co-hosts Miriam Vogel and Rosalind Wiseman continuing their AI literacy series with Susan Gonzales, CEO of AIandYou. The discussion centers on the critical need for basic AI literacy within marginalized communities to create opportunities and prevent an "AI divide." Susan emphasizes overcoming fear, building foundational AI knowledge, and understanding AI's impact on jobs and small businesses. She stresses the urgency of AI education and AIandYou's role in providing accessible resources.  The episode highlights the importance of dialogue and strategic partnerships to advance AI literacy, ensuring that everyone can benefit from AI's opportunities before the "window" closes.</p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/ai-literacy-series-bridging-the-gap-between-technology-and-communities-with-susan-gonzalez/">Episode Blog Post</a></li><li><a href="https://aiandyou.org/">AIandYou</a></li></ul>]]>
      </content:encoded>
      <pubDate>Mon, 07 Apr 2025 17:09:26 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/0fa70de9/ecc08e8a.mp3" length="73917461" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3079</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>This episode of <em>In AI We Trust? </em>features co-hosts Miriam Vogel and Rosalind Wiseman continuing their AI literacy series with Susan Gonzales, CEO of AIandYou. The discussion centers on the critical need for basic AI literacy within marginalized communities to create opportunities and prevent an "AI divide." Susan emphasizes overcoming fear, building foundational AI knowledge, and understanding AI's impact on jobs and small businesses. She stresses the urgency of AI education and AIandYou's role in providing accessible resources.  The episode highlights the importance of dialogue and strategic partnerships to advance AI literacy, ensuring that everyone can benefit from AI's opportunities before the "window" closes.</p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/ai-literacy-series-bridging-the-gap-between-technology-and-communities-with-susan-gonzalez/">Episode Blog Post</a></li><li><a href="https://aiandyou.org/">AIandYou</a></li></ul>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>AI Literacy Series Ep. 5 with Judy Spitz: Fixing the Tech Talent Pipeline</title>
      <itunes:episode>105</itunes:episode>
      <podcast:episode>105</podcast:episode>
      <itunes:title>AI Literacy Series Ep. 5 with Judy Spitz: Fixing the Tech Talent Pipeline</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">612ce74c-8937-45e2-9786-210159bcc29b</guid>
      <link>https://share.transistor.fm/s/b3f15b55</link>
      <description>
        <![CDATA[<p>In this episode of <em>In AI We Trust?,</em> co-hosts Miriam Vogel and Rosalind Wiseman speak with Dr. Judith Spitz, Founder and Executive Director of Break Through Tech sheds light on the blind spots within the industry, and discusses how Break Through Tech is pioneering innovative programs to open doors to talented individuals. She gets more young people from a broad array of backgrounds to study technology disciplines, ensures they learn leadership and other skills critical to their success, and gets them into industry- she is single-handedly building a more robust and prepared tech ecosystem.</p><p><br>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/podcast/ai-literacy-series-breaking-barriers-building-a-more-robust-tech-talent-pipeline-with-break-through-tech-with-judith-spitz/">Episode Blog Post</a></li><li><a href="https://www.breakthroughtech.org/">Break Through Tech</a></li><li><a href="https://www.breakthroughtech.org/app/uploads/2022/11/BTT_report_sprinternship_FINAL_10.12.22-3.pdf">Achieving Greater Gender Equity in Tech</a></li></ul>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode of <em>In AI We Trust?,</em> co-hosts Miriam Vogel and Rosalind Wiseman speak with Dr. Judith Spitz, Founder and Executive Director of Break Through Tech sheds light on the blind spots within the industry, and discusses how Break Through Tech is pioneering innovative programs to open doors to talented individuals. She gets more young people from a broad array of backgrounds to study technology disciplines, ensures they learn leadership and other skills critical to their success, and gets them into industry- she is single-handedly building a more robust and prepared tech ecosystem.</p><p><br>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/podcast/ai-literacy-series-breaking-barriers-building-a-more-robust-tech-talent-pipeline-with-break-through-tech-with-judith-spitz/">Episode Blog Post</a></li><li><a href="https://www.breakthroughtech.org/">Break Through Tech</a></li><li><a href="https://www.breakthroughtech.org/app/uploads/2022/11/BTT_report_sprinternship_FINAL_10.12.22-3.pdf">Achieving Greater Gender Equity in Tech</a></li></ul>]]>
      </content:encoded>
      <pubDate>Tue, 25 Mar 2025 08:14:04 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/b3f15b55/ac5818a1.mp3" length="109516804" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>4559</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode of <em>In AI We Trust?,</em> co-hosts Miriam Vogel and Rosalind Wiseman speak with Dr. Judith Spitz, Founder and Executive Director of Break Through Tech sheds light on the blind spots within the industry, and discusses how Break Through Tech is pioneering innovative programs to open doors to talented individuals. She gets more young people from a broad array of backgrounds to study technology disciplines, ensures they learn leadership and other skills critical to their success, and gets them into industry- she is single-handedly building a more robust and prepared tech ecosystem.</p><p><br>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/podcast/ai-literacy-series-breaking-barriers-building-a-more-robust-tech-talent-pipeline-with-break-through-tech-with-judith-spitz/">Episode Blog Post</a></li><li><a href="https://www.breakthroughtech.org/">Break Through Tech</a></li><li><a href="https://www.breakthroughtech.org/app/uploads/2022/11/BTT_report_sprinternship_FINAL_10.12.22-3.pdf">Achieving Greater Gender Equity in Tech</a></li></ul>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>AI Literacy Series Ep. 4: Mason Grimshaw on AI Literacy and Data Sovereignty for Indigenous Communities</title>
      <itunes:episode>104</itunes:episode>
      <podcast:episode>104</podcast:episode>
      <itunes:title>AI Literacy Series Ep. 4: Mason Grimshaw on AI Literacy and Data Sovereignty for Indigenous Communities</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">48e56026-d14f-4ed0-b248-282a877ddefd</guid>
      <link>https://share.transistor.fm/s/1231e2ef</link>
      <description>
        <![CDATA[<p>Co-hosts of EqualAI’s AI Literacy Series, Miriam Vogel and Rosalind Wiseman are joined by Mason Grimshaw, data scientist at Ode Partners and VP at IndigiGenius. Grimshaw discusses his roots growing up on a reservation, and what led him to the field of AI. He explains why it’s his mission to bring AI education and tools back to his community. Grimshaw articulates how AI literacy is essential for Indigenous communities to ensure they retain data sovereignty and benefit from these game-changing tools.</p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/podcast/ai-literacy-series-ai-literacy-and-data-sovereignty-for-indigenous-communities-with-mason-grimshaw/">Episode Blog Post</a></li><li><a href="https://lakota.aicode.camp/">Lakota AI Code Camp</a></li><li><a href="https://www.indigigenius.org/">IndigiGenius</a></li><li><a href="https://www.indigigenius.org/first-languages-ai-reality">First Languages AI Reality (FLAIR)</a></li></ul>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Co-hosts of EqualAI’s AI Literacy Series, Miriam Vogel and Rosalind Wiseman are joined by Mason Grimshaw, data scientist at Ode Partners and VP at IndigiGenius. Grimshaw discusses his roots growing up on a reservation, and what led him to the field of AI. He explains why it’s his mission to bring AI education and tools back to his community. Grimshaw articulates how AI literacy is essential for Indigenous communities to ensure they retain data sovereignty and benefit from these game-changing tools.</p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/podcast/ai-literacy-series-ai-literacy-and-data-sovereignty-for-indigenous-communities-with-mason-grimshaw/">Episode Blog Post</a></li><li><a href="https://lakota.aicode.camp/">Lakota AI Code Camp</a></li><li><a href="https://www.indigigenius.org/">IndigiGenius</a></li><li><a href="https://www.indigigenius.org/first-languages-ai-reality">First Languages AI Reality (FLAIR)</a></li></ul>]]>
      </content:encoded>
      <pubDate>Tue, 11 Mar 2025 12:58:02 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/1231e2ef/57c8bad9.mp3" length="81124141" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3379</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Co-hosts of EqualAI’s AI Literacy Series, Miriam Vogel and Rosalind Wiseman are joined by Mason Grimshaw, data scientist at Ode Partners and VP at IndigiGenius. Grimshaw discusses his roots growing up on a reservation, and what led him to the field of AI. He explains why it’s his mission to bring AI education and tools back to his community. Grimshaw articulates how AI literacy is essential for Indigenous communities to ensure they retain data sovereignty and benefit from these game-changing tools.</p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/podcast/ai-literacy-series-ai-literacy-and-data-sovereignty-for-indigenous-communities-with-mason-grimshaw/">Episode Blog Post</a></li><li><a href="https://lakota.aicode.camp/">Lakota AI Code Camp</a></li><li><a href="https://www.indigigenius.org/">IndigiGenius</a></li><li><a href="https://www.indigigenius.org/first-languages-ai-reality">First Languages AI Reality (FLAIR)</a></li></ul>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>AI Literacy Series Ep. 3: danah boyd on Thinking Critically about the Systems That Shape Us</title>
      <itunes:episode>103</itunes:episode>
      <podcast:episode>103</podcast:episode>
      <itunes:title>AI Literacy Series Ep. 3: danah boyd on Thinking Critically about the Systems That Shape Us</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">b85f60a5-e29b-4b88-91cd-4c88aa092580</guid>
      <link>https://share.transistor.fm/s/0457ee96</link>
      <description>
        <![CDATA[<p>Co-hosts of EqualAI’s AI Literacy Series, Miriam Vogel and Rosalind Wiseman sit down with danah boyd, Partner Researcher at Microsoft Research, visiting distinguished professor at Georgetown, and founder of Data &amp; Society Research Institute, to explore how AI is reshaping education, social structures, and power dynamics. boyd challenges common assumptions about AI, urging us to move beyond simplistic narratives of good vs. bad and instead ask: Who is designing these systems? What are their limitations? And what kind of future are we building with them?</p><p><br>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/podcast/ai-literacy-series-thinking-critically-about-the-systems-that-shape-us-with-danah-boyd/">Episode Blog Post</a></li></ul>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Co-hosts of EqualAI’s AI Literacy Series, Miriam Vogel and Rosalind Wiseman sit down with danah boyd, Partner Researcher at Microsoft Research, visiting distinguished professor at Georgetown, and founder of Data &amp; Society Research Institute, to explore how AI is reshaping education, social structures, and power dynamics. boyd challenges common assumptions about AI, urging us to move beyond simplistic narratives of good vs. bad and instead ask: Who is designing these systems? What are their limitations? And what kind of future are we building with them?</p><p><br>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/podcast/ai-literacy-series-thinking-critically-about-the-systems-that-shape-us-with-danah-boyd/">Episode Blog Post</a></li></ul>]]>
      </content:encoded>
      <pubDate>Thu, 27 Feb 2025 11:09:10 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/0457ee96/9c793b66.mp3" length="108705347" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>4524</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Co-hosts of EqualAI’s AI Literacy Series, Miriam Vogel and Rosalind Wiseman sit down with danah boyd, Partner Researcher at Microsoft Research, visiting distinguished professor at Georgetown, and founder of Data &amp; Society Research Institute, to explore how AI is reshaping education, social structures, and power dynamics. boyd challenges common assumptions about AI, urging us to move beyond simplistic narratives of good vs. bad and instead ask: Who is designing these systems? What are their limitations? And what kind of future are we building with them?</p><p><br>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/podcast/ai-literacy-series-thinking-critically-about-the-systems-that-shape-us-with-danah-boyd/">Episode Blog Post</a></li></ul>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>AI Literacy Series Ep. 2 with Dewey Murdick (CSET): Centering People in AI’s Progress</title>
      <itunes:episode>102</itunes:episode>
      <podcast:episode>102</podcast:episode>
      <itunes:title>AI Literacy Series Ep. 2 with Dewey Murdick (CSET): Centering People in AI’s Progress</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">8c7eeddc-c83c-4084-a391-6ba51c13468d</guid>
      <link>https://share.transistor.fm/s/a54ec316</link>
      <description>
        <![CDATA[<p>In this episode of EqualAI’s AI Literacy Series, co-hosts Miriam Vogel and Rosalind Wiseman sit down with AI policy expert Dewey Murdick, Executive Director at Georgetown's Center for Security and Emerging Technology (CSET) who shares his hopes for AI’s role in personal development and other key areas of society. From national security to education, Murdick unpacks the policies and international collaboration needed to ensure AI serves humanity first.</p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/podcast/ai-literacy-series-building-trust-and-understanding-in-ai-with-dewey-murdick/">Episode Blog Post</a></li><li><a href="https://cset.georgetown.edu/publication/enabling-principles-for-ai-governance/">Enabling Principles for AI Governance</a></li></ul>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode of EqualAI’s AI Literacy Series, co-hosts Miriam Vogel and Rosalind Wiseman sit down with AI policy expert Dewey Murdick, Executive Director at Georgetown's Center for Security and Emerging Technology (CSET) who shares his hopes for AI’s role in personal development and other key areas of society. From national security to education, Murdick unpacks the policies and international collaboration needed to ensure AI serves humanity first.</p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/podcast/ai-literacy-series-building-trust-and-understanding-in-ai-with-dewey-murdick/">Episode Blog Post</a></li><li><a href="https://cset.georgetown.edu/publication/enabling-principles-for-ai-governance/">Enabling Principles for AI Governance</a></li></ul>]]>
      </content:encoded>
      <pubDate>Tue, 11 Feb 2025 12:21:56 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/a54ec316/cd82ffc7.mp3" length="38500474" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2405</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode of EqualAI’s AI Literacy Series, co-hosts Miriam Vogel and Rosalind Wiseman sit down with AI policy expert Dewey Murdick, Executive Director at Georgetown's Center for Security and Emerging Technology (CSET) who shares his hopes for AI’s role in personal development and other key areas of society. From national security to education, Murdick unpacks the policies and international collaboration needed to ensure AI serves humanity first.</p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, leading efforts in this area of AI literacy, and how listeners can benefit from these experts and tools.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/podcast/ai-literacy-series-building-trust-and-understanding-in-ai-with-dewey-murdick/">Episode Blog Post</a></li><li><a href="https://cset.georgetown.edu/publication/enabling-principles-for-ai-governance/">Enabling Principles for AI Governance</a></li></ul>]]>
      </itunes:summary>
      <itunes:keywords>AI Literacy, Artificial Intelligence, AI, National Security, AI policy, Tech, AI governance</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>AI Literacy Series Ep. 1: What is AI and Why Are We Afraid of It?</title>
      <itunes:episode>101</itunes:episode>
      <podcast:episode>101</podcast:episode>
      <itunes:title>AI Literacy Series Ep. 1: What is AI and Why Are We Afraid of It?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">e8820120-0ac3-4066-b612-16504a835cd1</guid>
      <link>https://share.transistor.fm/s/0e5e4863</link>
      <description>
        <![CDATA[<p>Miriam Vogel and Rosalind Wiseman break down the basics, the limitations, the power, and the fear surrounding AI – and how you can transform it from a concept to a tool in the first episode of the <em>In</em> <em>AI We Trust? </em>AI Literacy series.</p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, who is leading in this area of AI literacy, and how listeners can benefit from these experts.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/podcast/the-ai-literacy-project-what-is-ai-and-why-are-we-afraid-of-it/">Episode Blog Post</a></li><li><a href="https://www.equalai.org/equalai-ai-literacy-initiative/">EqualAI's AI Literacy Initiative</a></li></ul>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Miriam Vogel and Rosalind Wiseman break down the basics, the limitations, the power, and the fear surrounding AI – and how you can transform it from a concept to a tool in the first episode of the <em>In</em> <em>AI We Trust? </em>AI Literacy series.</p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, who is leading in this area of AI literacy, and how listeners can benefit from these experts.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/podcast/the-ai-literacy-project-what-is-ai-and-why-are-we-afraid-of-it/">Episode Blog Post</a></li><li><a href="https://www.equalai.org/equalai-ai-literacy-initiative/">EqualAI's AI Literacy Initiative</a></li></ul>]]>
      </content:encoded>
      <pubDate>Wed, 29 Jan 2025 13:17:38 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/0e5e4863/93240e99.mp3" length="37087840" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1545</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Miriam Vogel and Rosalind Wiseman break down the basics, the limitations, the power, and the fear surrounding AI – and how you can transform it from a concept to a tool in the first episode of the <em>In</em> <em>AI We Trust? </em>AI Literacy series.</p><p>The EqualAI AI Literacy podcast series builds on <em>In AI We Trust?</em>’s global reach, focusing specifically on AI literacy. Featuring prominent leaders in the technology, education, and governance fields, this special series will provide listeners with valuable insights and discussions around AI’s impact on society, who is leading in this area of AI literacy, and how listeners can benefit from these experts.</p><p>Related Resources</p><ul><li><a href="https://www.equalai.org/blog/podcast/the-ai-literacy-project-what-is-ai-and-why-are-we-afraid-of-it/">Episode Blog Post</a></li><li><a href="https://www.equalai.org/equalai-ai-literacy-initiative/">EqualAI's AI Literacy Initiative</a></li></ul>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Senator Mike Rounds (R-SD): Sen. Rounds' 2025 Message: Why Every Senate Committee is Talking AI This Congress</title>
      <itunes:episode>100</itunes:episode>
      <podcast:episode>100</podcast:episode>
      <itunes:title>Senator Mike Rounds (R-SD): Sen. Rounds' 2025 Message: Why Every Senate Committee is Talking AI This Congress</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">4430d00f-2562-4627-a939-009c3d0a088a</guid>
      <link>https://share.transistor.fm/s/887ceef6</link>
      <description>
        <![CDATA[<p>In this episode of #InAIWeTrust, Senator Mike Rounds (R-SD) discusses the transformative role of AI and the Senate’s efforts to support its innovation and development. From working to advance AI-driven health care solutions to ensuring U.S. leadership in innovation, he shares legislative priorities and insights from the Senate AI Insight Forums and underscores the importance of AI literacy and collaboration across industry, reminding us: “AI is real, it’s here, it’s not going away.”</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode of #InAIWeTrust, Senator Mike Rounds (R-SD) discusses the transformative role of AI and the Senate’s efforts to support its innovation and development. From working to advance AI-driven health care solutions to ensuring U.S. leadership in innovation, he shares legislative priorities and insights from the Senate AI Insight Forums and underscores the importance of AI literacy and collaboration across industry, reminding us: “AI is real, it’s here, it’s not going away.”</p>]]>
      </content:encoded>
      <pubDate>Fri, 17 Jan 2025 13:40:53 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/887ceef6/9a85830e.mp3" length="43738928" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1822</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode of #InAIWeTrust, Senator Mike Rounds (R-SD) discusses the transformative role of AI and the Senate’s efforts to support its innovation and development. From working to advance AI-driven health care solutions to ensuring U.S. leadership in innovation, he shares legislative priorities and insights from the Senate AI Insight Forums and underscores the importance of AI literacy and collaboration across industry, reminding us: “AI is real, it’s here, it’s not going away.”</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Vilas Dhar (McGovern Foundation): AI for the people and by the people: Year-in-Review and 2025 Predictions</title>
      <itunes:episode>99</itunes:episode>
      <podcast:episode>99</podcast:episode>
      <itunes:title>Vilas Dhar (McGovern Foundation): AI for the people and by the people: Year-in-Review and 2025 Predictions</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">2b1a96ca-0437-40d3-ad15-d9d91b81639a</guid>
      <link>https://share.transistor.fm/s/0e97e529</link>
      <description>
        <![CDATA[<p>In this 2024 year-end episode of <em>In AI We Trust?</em>, Vilas Dhar of the Patrick J. McGovern Foundation and Miriam Vogel of EqualAI review 2024 and discuss predictions for the year ahead. </p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this 2024 year-end episode of <em>In AI We Trust?</em>, Vilas Dhar of the Patrick J. McGovern Foundation and Miriam Vogel of EqualAI review 2024 and discuss predictions for the year ahead. </p>]]>
      </content:encoded>
      <pubDate>Fri, 20 Dec 2024 05:00:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/0e97e529/74429f4c.mp3" length="35116067" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2194</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this 2024 year-end episode of <em>In AI We Trust?</em>, Vilas Dhar of the Patrick J. McGovern Foundation and Miriam Vogel of EqualAI review 2024 and discuss predictions for the year ahead. </p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Elizabeth Kelly (AISI): How will the US AI Safety Institute lead the US and globe in AI safety? </title>
      <itunes:episode>98</itunes:episode>
      <podcast:episode>98</podcast:episode>
      <itunes:title>Elizabeth Kelly (AISI): How will the US AI Safety Institute lead the US and globe in AI safety? </itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">347c40c9-5fed-4107-9d02-bf444d13e26b</guid>
      <link>https://share.transistor.fm/s/fa475510</link>
      <description>
        <![CDATA[<p>In this episode of #InAIWeTrust Elizabeth Kelly, director of the U.S. Artificial Intelligence Safety Institute (AISI) explains the significance of last week’s National Security Memorandum (NSM) on AI, shares her experience working on the Biden Executive Order on AI, and provides insight into the US AISI including: recent guidance for companies to mitigate AI risks, partnerships with Anthropic and Open AI; the upcoming inaugural convening of International Network of AI Safety Institutes. </p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode of #InAIWeTrust Elizabeth Kelly, director of the U.S. Artificial Intelligence Safety Institute (AISI) explains the significance of last week’s National Security Memorandum (NSM) on AI, shares her experience working on the Biden Executive Order on AI, and provides insight into the US AISI including: recent guidance for companies to mitigate AI risks, partnerships with Anthropic and Open AI; the upcoming inaugural convening of International Network of AI Safety Institutes. </p>]]>
      </content:encoded>
      <pubDate>Thu, 31 Oct 2024 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/fa475510/8654b018.mp3" length="38073982" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1586</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode of #InAIWeTrust Elizabeth Kelly, director of the U.S. Artificial Intelligence Safety Institute (AISI) explains the significance of last week’s National Security Memorandum (NSM) on AI, shares her experience working on the Biden Executive Order on AI, and provides insight into the US AISI including: recent guidance for companies to mitigate AI risks, partnerships with Anthropic and Open AI; the upcoming inaugural convening of International Network of AI Safety Institutes. </p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Michael Chertoff (Chertoff Group) and Miriam Vogel (EqualAI): Is your AI use violating the law?</title>
      <itunes:episode>97</itunes:episode>
      <podcast:episode>97</podcast:episode>
      <itunes:title>Michael Chertoff (Chertoff Group) and Miriam Vogel (EqualAI): Is your AI use violating the law?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">76f2e52a-bc1c-4f69-942f-b1259ebdd51a</guid>
      <link>https://share.transistor.fm/s/dabd8e56</link>
      <description>
        <![CDATA[<p>In this special edition of #InAIWeTrust?, EqualAI President and CEO Miriam Vogel and former Secretary of Homeland Security Michael Chertoff sit down to discuss their recent co-authored paper, <em>Is Your Use of AI Violating the Law? An Overview of the Current Legal Landscape.</em> Special guest Victoria Espinel, CEO of BSA | The Software Alliance, moderates the conversation with the co-authors to explore key findings, current laws on the books, and potential liabilities from AI deployment and use that lawyers, executives, judges, and policy makers need to understand in our increasingly AI-driven world. </p><p><br></p><p>The article can be found on our website <a href="https://www.equalai.org/wp-content/uploads/2024/09/Vogel_et_al_Sep_13_2024.pdf">here</a>.</p><p>Read the Axios exclusive <a href="https://www.axios.com/2024/09/16/companies-liability-ai-nyu-law-journal">here</a>.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this special edition of #InAIWeTrust?, EqualAI President and CEO Miriam Vogel and former Secretary of Homeland Security Michael Chertoff sit down to discuss their recent co-authored paper, <em>Is Your Use of AI Violating the Law? An Overview of the Current Legal Landscape.</em> Special guest Victoria Espinel, CEO of BSA | The Software Alliance, moderates the conversation with the co-authors to explore key findings, current laws on the books, and potential liabilities from AI deployment and use that lawyers, executives, judges, and policy makers need to understand in our increasingly AI-driven world. </p><p><br></p><p>The article can be found on our website <a href="https://www.equalai.org/wp-content/uploads/2024/09/Vogel_et_al_Sep_13_2024.pdf">here</a>.</p><p>Read the Axios exclusive <a href="https://www.axios.com/2024/09/16/companies-liability-ai-nyu-law-journal">here</a>.</p>]]>
      </content:encoded>
      <pubDate>Thu, 17 Oct 2024 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/dabd8e56/9e2ecf4a.mp3" length="27768752" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1735</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this special edition of #InAIWeTrust?, EqualAI President and CEO Miriam Vogel and former Secretary of Homeland Security Michael Chertoff sit down to discuss their recent co-authored paper, <em>Is Your Use of AI Violating the Law? An Overview of the Current Legal Landscape.</em> Special guest Victoria Espinel, CEO of BSA | The Software Alliance, moderates the conversation with the co-authors to explore key findings, current laws on the books, and potential liabilities from AI deployment and use that lawyers, executives, judges, and policy makers need to understand in our increasingly AI-driven world. </p><p><br></p><p>The article can be found on our website <a href="https://www.equalai.org/wp-content/uploads/2024/09/Vogel_et_al_Sep_13_2024.pdf">here</a>.</p><p>Read the Axios exclusive <a href="https://www.axios.com/2024/09/16/companies-liability-ai-nyu-law-journal">here</a>.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Dr. Brennan Spiegel (Cedars-Sinai): AI in healthcare: Will AI help humans to thrive?</title>
      <itunes:episode>96</itunes:episode>
      <podcast:episode>96</podcast:episode>
      <itunes:title>Dr. Brennan Spiegel (Cedars-Sinai): AI in healthcare: Will AI help humans to thrive?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">a88e5d1d-251f-4d8c-b09e-b54160c2103d</guid>
      <link>https://share.transistor.fm/s/3ac435cd</link>
      <description>
        <![CDATA[<p>In this episode of #InAIWeTrust, Dr. Brennan Spiegel, Cedars-Sinai Director of Health Services Research and Chair of Digital Health Ethics, discusses his use of AI for increased efficiencies and to improve patient care, including co-founding Xaia, an AI mental health tool. He talks about the importance of human-centered design and how AI can enable doctors to better serve and care for patients. </p><p><br></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode of #InAIWeTrust, Dr. Brennan Spiegel, Cedars-Sinai Director of Health Services Research and Chair of Digital Health Ethics, discusses his use of AI for increased efficiencies and to improve patient care, including co-founding Xaia, an AI mental health tool. He talks about the importance of human-centered design and how AI can enable doctors to better serve and care for patients. </p><p><br></p>]]>
      </content:encoded>
      <pubDate>Thu, 03 Oct 2024 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/3ac435cd/de1e7da3.mp3" length="21793596" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1361</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode of #InAIWeTrust, Dr. Brennan Spiegel, Cedars-Sinai Director of Health Services Research and Chair of Digital Health Ethics, discusses his use of AI for increased efficiencies and to improve patient care, including co-founding Xaia, an AI mental health tool. He talks about the importance of human-centered design and how AI can enable doctors to better serve and care for patients. </p><p><br></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Russell Wald (HAI): Innovating for the future - Can academia bring the next wave of AI innovation and train our future generations? </title>
      <itunes:episode>95</itunes:episode>
      <podcast:episode>95</podcast:episode>
      <itunes:title>Russell Wald (HAI): Innovating for the future - Can academia bring the next wave of AI innovation and train our future generations? </itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">c6b7645e-ff5d-4df6-b04f-8e1510957e1b</guid>
      <link>https://share.transistor.fm/s/6e360e99</link>
      <description>
        <![CDATA[<p>In this episode, Russell Wald, Deputy Director at the Stanford Institute for Human-Centered Artificial Intelligence (HAI) underscores the importance of academic research around AI, key lessons from the AI Index Report, the need for uniform AI benchmarks, and the value of AI education for policy makers.</p><p><br></p><p>Resources mentioned in this episode:</p><p><a href="https://aiindex.stanford.edu/report/">2024 AI Index Report</a></p><p><a href="https://hai.stanford.edu/news/ai-trial-legal-models-hallucinate-1-out-6-or-more-benchmarking-queries">AI on Trial: Legal Models Hallucinate in 1 out of 6 (or More) Benchmarking Queries</a></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, Russell Wald, Deputy Director at the Stanford Institute for Human-Centered Artificial Intelligence (HAI) underscores the importance of academic research around AI, key lessons from the AI Index Report, the need for uniform AI benchmarks, and the value of AI education for policy makers.</p><p><br></p><p>Resources mentioned in this episode:</p><p><a href="https://aiindex.stanford.edu/report/">2024 AI Index Report</a></p><p><a href="https://hai.stanford.edu/news/ai-trial-legal-models-hallucinate-1-out-6-or-more-benchmarking-queries">AI on Trial: Legal Models Hallucinate in 1 out of 6 (or More) Benchmarking Queries</a></p>]]>
      </content:encoded>
      <pubDate>Tue, 30 Jul 2024 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/6e360e99/6063a233.mp3" length="39757948" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2484</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode, Russell Wald, Deputy Director at the Stanford Institute for Human-Centered Artificial Intelligence (HAI) underscores the importance of academic research around AI, key lessons from the AI Index Report, the need for uniform AI benchmarks, and the value of AI education for policy makers.</p><p><br></p><p>Resources mentioned in this episode:</p><p><a href="https://aiindex.stanford.edu/report/">2024 AI Index Report</a></p><p><a href="https://hai.stanford.edu/news/ai-trial-legal-models-hallucinate-1-out-6-or-more-benchmarking-queries">AI on Trial: Legal Models Hallucinate in 1 out of 6 (or More) Benchmarking Queries</a></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Dmitri Alperovitch (Silverado Policy Accelerator): The role of AI in “Cold War II”?</title>
      <itunes:episode>94</itunes:episode>
      <podcast:episode>94</podcast:episode>
      <itunes:title>Dmitri Alperovitch (Silverado Policy Accelerator): The role of AI in “Cold War II”?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">2c249ce1-7215-4803-bba5-1ab5b0e156a5</guid>
      <link>https://share.transistor.fm/s/95922450</link>
      <description>
        <![CDATA[<p>Dmitri Alperovitch, Co-Founder and Chairman of Silverado Policy Accelerator and Co-Founder of CrowdStrike, joins this week’s episode of #InAIWeTrust to share his view that we are in the “Second Cold War” with China, the role of AI in this battle as well as in bio tech and other key sectors, and the role of government in this arena. </p><p>To hear more from Dmitri, tune into his podcast Geopolitics Decanted: <a href="https://podcast.silverado.org/episodes">https://podcast.silverado.org/episodes</a> </p><p><br></p><p>Dmitri's new book, "World on the Brink: How America Can Beat China in the Race for the Twenty-First Century" can be found here: <a href="https://www.amazon.com/dp/B0CF1TKHY2">https://www.amazon.com/dp/B0CF1TKHY2</a> </p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Dmitri Alperovitch, Co-Founder and Chairman of Silverado Policy Accelerator and Co-Founder of CrowdStrike, joins this week’s episode of #InAIWeTrust to share his view that we are in the “Second Cold War” with China, the role of AI in this battle as well as in bio tech and other key sectors, and the role of government in this arena. </p><p>To hear more from Dmitri, tune into his podcast Geopolitics Decanted: <a href="https://podcast.silverado.org/episodes">https://podcast.silverado.org/episodes</a> </p><p><br></p><p>Dmitri's new book, "World on the Brink: How America Can Beat China in the Race for the Twenty-First Century" can be found here: <a href="https://www.amazon.com/dp/B0CF1TKHY2">https://www.amazon.com/dp/B0CF1TKHY2</a> </p>]]>
      </content:encoded>
      <pubDate>Wed, 29 May 2024 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/95922450/2b9f455f.mp3" length="29699712" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1856</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Dmitri Alperovitch, Co-Founder and Chairman of Silverado Policy Accelerator and Co-Founder of CrowdStrike, joins this week’s episode of #InAIWeTrust to share his view that we are in the “Second Cold War” with China, the role of AI in this battle as well as in bio tech and other key sectors, and the role of government in this arena. </p><p>To hear more from Dmitri, tune into his podcast Geopolitics Decanted: <a href="https://podcast.silverado.org/episodes">https://podcast.silverado.org/episodes</a> </p><p><br></p><p>Dmitri's new book, "World on the Brink: How America Can Beat China in the Race for the Twenty-First Century" can be found here: <a href="https://www.amazon.com/dp/B0CF1TKHY2">https://www.amazon.com/dp/B0CF1TKHY2</a> </p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Scott Galloway (NYU): Wealth, influence, and AI in America: Will AI be a tool of societal control and who (should be) minding the store?</title>
      <itunes:episode>93</itunes:episode>
      <podcast:episode>93</podcast:episode>
      <itunes:title>Scott Galloway (NYU): Wealth, influence, and AI in America: Will AI be a tool of societal control and who (should be) minding the store?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">fc2d3141-25ed-434d-8b15-88a509415212</guid>
      <link>https://share.transistor.fm/s/67e3b505</link>
      <description>
        <![CDATA[<p>Scott Galloway, professor, entrepreneur, and best selling author, joins this week’s episode of In AI We Trust? to cover hot topics including: impact of AI on businesses as a “corporate ozempic,” the political influence of “shallow fakes,” the dangerous threat of AI on our increasingly vulnerable and lonely population, the role of business executives and regulators in guaranteeing our safety, and the potential of AI to unlock physical and mental health care. </p><p><br>--</p><p>Resources mentioned in this episode:</p><p><a href="https://www.profgalloway.com/the-algebra-of-wealth-3/">Algebra of Wealth</a></p><p><a href="https://www.penguinrandomhouse.com/books/609305/the-algebra-of-happiness-by-scott-galloway/">Algebra of Happiness</a> </p><p><br></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Scott Galloway, professor, entrepreneur, and best selling author, joins this week’s episode of In AI We Trust? to cover hot topics including: impact of AI on businesses as a “corporate ozempic,” the political influence of “shallow fakes,” the dangerous threat of AI on our increasingly vulnerable and lonely population, the role of business executives and regulators in guaranteeing our safety, and the potential of AI to unlock physical and mental health care. </p><p><br>--</p><p>Resources mentioned in this episode:</p><p><a href="https://www.profgalloway.com/the-algebra-of-wealth-3/">Algebra of Wealth</a></p><p><a href="https://www.penguinrandomhouse.com/books/609305/the-algebra-of-happiness-by-scott-galloway/">Algebra of Happiness</a> </p><p><br></p>]]>
      </content:encoded>
      <pubDate>Wed, 08 May 2024 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/67e3b505/f4356076.mp3" length="52263111" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2175</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Scott Galloway, professor, entrepreneur, and best selling author, joins this week’s episode of In AI We Trust? to cover hot topics including: impact of AI on businesses as a “corporate ozempic,” the political influence of “shallow fakes,” the dangerous threat of AI on our increasingly vulnerable and lonely population, the role of business executives and regulators in guaranteeing our safety, and the potential of AI to unlock physical and mental health care. </p><p><br>--</p><p>Resources mentioned in this episode:</p><p><a href="https://www.profgalloway.com/the-algebra-of-wealth-3/">Algebra of Wealth</a></p><p><a href="https://www.penguinrandomhouse.com/books/609305/the-algebra-of-happiness-by-scott-galloway/">Algebra of Happiness</a> </p><p><br></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Ylli Bajraktari (SCSP): Will the U.S. be AI-ready by 2030?</title>
      <itunes:episode>92</itunes:episode>
      <podcast:episode>92</podcast:episode>
      <itunes:title>Ylli Bajraktari (SCSP): Will the U.S. be AI-ready by 2030?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">9dfb6f35-9d4b-42d3-850c-1943e6f3368a</guid>
      <link>https://share.transistor.fm/s/cd5d47b8</link>
      <description>
        <![CDATA[<p>In this week’s episode of In AI We Trust? Ylli Bajraktari, President and CEO of the Special Competitive Studies Project, joins us to discuss the implications of AI on national security, geopolitical competition, what the US government can do to establish a foundation for success in AI leadership, and the upcoming SCSP AI expo in DC (May 7-8). </p><p><br>―</p><p>Resources mentioned in this episode:</p><p><a href="https://scsp222.substack.com/p/scsp-president-and-ceo-speaks-at?utm_source=publication-search">Ylli Bajraktari Testimony at the Second Senate AI Insight Forum </a></p><p><a href="https://scsp222.substack.com/p/the-next-chapter-in-ai">The Next Chapter in AI</a></p><p><a href="https://www.scsp.ai/wp-content/uploads/2023/12/2023-End-of-the-Year-Review.pdf">2023 Year in Review: Six Items To Watch In 2024</a></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this week’s episode of In AI We Trust? Ylli Bajraktari, President and CEO of the Special Competitive Studies Project, joins us to discuss the implications of AI on national security, geopolitical competition, what the US government can do to establish a foundation for success in AI leadership, and the upcoming SCSP AI expo in DC (May 7-8). </p><p><br>―</p><p>Resources mentioned in this episode:</p><p><a href="https://scsp222.substack.com/p/scsp-president-and-ceo-speaks-at?utm_source=publication-search">Ylli Bajraktari Testimony at the Second Senate AI Insight Forum </a></p><p><a href="https://scsp222.substack.com/p/the-next-chapter-in-ai">The Next Chapter in AI</a></p><p><a href="https://www.scsp.ai/wp-content/uploads/2023/12/2023-End-of-the-Year-Review.pdf">2023 Year in Review: Six Items To Watch In 2024</a></p>]]>
      </content:encoded>
      <pubDate>Thu, 02 May 2024 04:17:12 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/cd5d47b8/b1c6b801.mp3" length="55319353" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2301</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this week’s episode of In AI We Trust? Ylli Bajraktari, President and CEO of the Special Competitive Studies Project, joins us to discuss the implications of AI on national security, geopolitical competition, what the US government can do to establish a foundation for success in AI leadership, and the upcoming SCSP AI expo in DC (May 7-8). </p><p><br>―</p><p>Resources mentioned in this episode:</p><p><a href="https://scsp222.substack.com/p/scsp-president-and-ceo-speaks-at?utm_source=publication-search">Ylli Bajraktari Testimony at the Second Senate AI Insight Forum </a></p><p><a href="https://scsp222.substack.com/p/the-next-chapter-in-ai">The Next Chapter in AI</a></p><p><a href="https://www.scsp.ai/wp-content/uploads/2023/12/2023-End-of-the-Year-Review.pdf">2023 Year in Review: Six Items To Watch In 2024</a></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>K.J. Bagchi (The Leadership Conference Education Fund Center for Civil Rights and Tech) Encoding Justice: Can we protect our civil rights in the AI age?</title>
      <itunes:episode>91</itunes:episode>
      <podcast:episode>91</podcast:episode>
      <itunes:title>K.J. Bagchi (The Leadership Conference Education Fund Center for Civil Rights and Tech) Encoding Justice: Can we protect our civil rights in the AI age?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">0cf9a3f9-c307-49b9-b142-0d7b09c4791c</guid>
      <link>https://share.transistor.fm/s/e640f2ea</link>
      <description>
        <![CDATA[<p>In this episode of In AI We Trust?, Koustubh “K.J.” Bagchi, VP of the recently established Center for Civil Rights and Technology, founded by The Leadership Conference Education Fund, discusses the impact of AI on democracy, including deepfakes and elections; the interplay between AI and privacy; and the state of federal civil rights actions on AI.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode of In AI We Trust?, Koustubh “K.J.” Bagchi, VP of the recently established Center for Civil Rights and Technology, founded by The Leadership Conference Education Fund, discusses the impact of AI on democracy, including deepfakes and elections; the interplay between AI and privacy; and the state of federal civil rights actions on AI.</p>]]>
      </content:encoded>
      <pubDate>Wed, 10 Apr 2024 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/e640f2ea/9c370511.mp3" length="48242118" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1997</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode of In AI We Trust?, Koustubh “K.J.” Bagchi, VP of the recently established Center for Civil Rights and Technology, founded by The Leadership Conference Education Fund, discusses the impact of AI on democracy, including deepfakes and elections; the interplay between AI and privacy; and the state of federal civil rights actions on AI.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Shelley Zalis, the Female Quotient (FQ): Can we achieve equality through algorithms?</title>
      <itunes:episode>90</itunes:episode>
      <podcast:episode>90</podcast:episode>
      <itunes:title>Shelley Zalis, the Female Quotient (FQ): Can we achieve equality through algorithms?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">13cbc210-fcda-4903-ad13-daf815677526</guid>
      <link>https://share.transistor.fm/s/f5d6cedc</link>
      <description>
        <![CDATA[<p>In this episode of In AI We Trust? Shelley Zalis, founder and CEO of The Female Quotient (FQ), joins us in celebration of Women’s History Month. Tune in to learn about the FQ’s Algorithm for Equality Manifesto, how AI can help close the gender gap and the whys of championing women in industry.  </p><p>―</p><p>Resources mentioned in this episode:<br><a href="https://thefq.thefemalequotient.com/algorithm-for-equality/">The Algorithm for Equality® Manifesto</a></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode of In AI We Trust? Shelley Zalis, founder and CEO of The Female Quotient (FQ), joins us in celebration of Women’s History Month. Tune in to learn about the FQ’s Algorithm for Equality Manifesto, how AI can help close the gender gap and the whys of championing women in industry.  </p><p>―</p><p>Resources mentioned in this episode:<br><a href="https://thefq.thefemalequotient.com/algorithm-for-equality/">The Algorithm for Equality® Manifesto</a></p>]]>
      </content:encoded>
      <pubDate>Wed, 27 Mar 2024 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/f5d6cedc/eb218e00.mp3" length="25949214" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1620</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode of In AI We Trust? Shelley Zalis, founder and CEO of The Female Quotient (FQ), joins us in celebration of Women’s History Month. Tune in to learn about the FQ’s Algorithm for Equality Manifesto, how AI can help close the gender gap and the whys of championing women in industry.  </p><p>―</p><p>Resources mentioned in this episode:<br><a href="https://thefq.thefemalequotient.com/algorithm-for-equality/">The Algorithm for Equality® Manifesto</a></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Helen Toner (CSET): How to govern AI in the face of uncertainty?</title>
      <itunes:episode>89</itunes:episode>
      <podcast:episode>89</podcast:episode>
      <itunes:title>Helen Toner (CSET): How to govern AI in the face of uncertainty?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">ff14e1ad-c759-4fc9-9e93-ab7de4f6fe5a</guid>
      <link>https://share.transistor.fm/s/5be1dcbe</link>
      <description>
        <![CDATA[<p>This week Helen Toner, Director of Strategy and Foundational Research Grants at Georgetown’s Center for Security and Emerging Technology (CSET), joins In AI We Trust? to discuss decoding China’s AI policies, AI’s role in warfare, the potential impact of AI agents, challenges around regulating changing technology, and how to approach AI evaluations. </p><p>―</p><p>Resources mentioned in this episode:<br><a href="https://cset.georgetown.edu/article/regulating-the-ai-frontier-design-choices-and-constraints/">Regulating the AI Frontier: Design Choices and Constraints</a></p><p><a href="https://cset.georgetown.edu/article/will-china-set-global-tech-standards/">Will China Set Global Tech Standards?</a></p><p><a href="https://cset.georgetown.edu/article/the-rise-of-artificial-intelligence-raises-serious-concerns-for-national-security/">The rise of artificial intelligence raises serious concerns for national security</a></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>This week Helen Toner, Director of Strategy and Foundational Research Grants at Georgetown’s Center for Security and Emerging Technology (CSET), joins In AI We Trust? to discuss decoding China’s AI policies, AI’s role in warfare, the potential impact of AI agents, challenges around regulating changing technology, and how to approach AI evaluations. </p><p>―</p><p>Resources mentioned in this episode:<br><a href="https://cset.georgetown.edu/article/regulating-the-ai-frontier-design-choices-and-constraints/">Regulating the AI Frontier: Design Choices and Constraints</a></p><p><a href="https://cset.georgetown.edu/article/will-china-set-global-tech-standards/">Will China Set Global Tech Standards?</a></p><p><a href="https://cset.georgetown.edu/article/the-rise-of-artificial-intelligence-raises-serious-concerns-for-national-security/">The rise of artificial intelligence raises serious concerns for national security</a></p>]]>
      </content:encoded>
      <pubDate>Wed, 13 Mar 2024 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/5be1dcbe/ef41d22d.mp3" length="33687709" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2104</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>This week Helen Toner, Director of Strategy and Foundational Research Grants at Georgetown’s Center for Security and Emerging Technology (CSET), joins In AI We Trust? to discuss decoding China’s AI policies, AI’s role in warfare, the potential impact of AI agents, challenges around regulating changing technology, and how to approach AI evaluations. </p><p>―</p><p>Resources mentioned in this episode:<br><a href="https://cset.georgetown.edu/article/regulating-the-ai-frontier-design-choices-and-constraints/">Regulating the AI Frontier: Design Choices and Constraints</a></p><p><a href="https://cset.georgetown.edu/article/will-china-set-global-tech-standards/">Will China Set Global Tech Standards?</a></p><p><a href="https://cset.georgetown.edu/article/the-rise-of-artificial-intelligence-raises-serious-concerns-for-national-security/">The rise of artificial intelligence raises serious concerns for national security</a></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Micky Tripathi (HHS): Is AI good for your health?: How HHS is approaching AI use to support innovation and reduce harms &amp; inefficiencies in our health care system.</title>
      <itunes:episode>88</itunes:episode>
      <podcast:episode>88</podcast:episode>
      <itunes:title>Micky Tripathi (HHS): Is AI good for your health?: How HHS is approaching AI use to support innovation and reduce harms &amp; inefficiencies in our health care system.</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">62244a21-e595-4657-8b64-e76d43a04fe9</guid>
      <link>https://share.transistor.fm/s/ec3828fe</link>
      <description>
        <![CDATA[<p>In this episode, Dr. Micky Tripathi, National Coordinator for Health Information Technology at the Department of Health and Human Services (HHS), shares how AI can improve patient care, current work at HHS to implement the WH Executive Order on AI, the potential risks that AI presents to the healthcare system and how transparency can improve AI outcomes in the healthcare space. </p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, Dr. Micky Tripathi, National Coordinator for Health Information Technology at the Department of Health and Human Services (HHS), shares how AI can improve patient care, current work at HHS to implement the WH Executive Order on AI, the potential risks that AI presents to the healthcare system and how transparency can improve AI outcomes in the healthcare space. </p>]]>
      </content:encoded>
      <pubDate>Thu, 07 Mar 2024 05:00:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/ec3828fe/3deb8b6d.mp3" length="34529995" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2157</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode, Dr. Micky Tripathi, National Coordinator for Health Information Technology at the Department of Health and Human Services (HHS), shares how AI can improve patient care, current work at HHS to implement the WH Executive Order on AI, the potential risks that AI presents to the healthcare system and how transparency can improve AI outcomes in the healthcare space. </p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Dr. Athina Kanioura (PepsiCo):  How to change your employee “DNA” to harness the power of AI (Hint: upskilling)</title>
      <itunes:episode>87</itunes:episode>
      <podcast:episode>87</podcast:episode>
      <itunes:title>Dr. Athina Kanioura (PepsiCo):  How to change your employee “DNA” to harness the power of AI (Hint: upskilling)</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">ea0ec8ed-d510-4436-8bb6-209bb1599bbe</guid>
      <link>https://share.transistor.fm/s/600b5cd6</link>
      <description>
        <![CDATA[<p>In this episode, Dr. Athina Kanioura, Executive Vice President and Chief Strategy and Transformation Officer at PepsiCo, updates us on Pepsico’s pioneering steps in providing technology and opportunities to its workers and partners of all sizes, her wish list for AI and privacy regulation, and the measures she has instilled at Pepsico to establish accountability, transparency, and success in developing responsible AI practices.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, Dr. Athina Kanioura, Executive Vice President and Chief Strategy and Transformation Officer at PepsiCo, updates us on Pepsico’s pioneering steps in providing technology and opportunities to its workers and partners of all sizes, her wish list for AI and privacy regulation, and the measures she has instilled at Pepsico to establish accountability, transparency, and success in developing responsible AI practices.</p>]]>
      </content:encoded>
      <pubDate>Wed, 21 Feb 2024 05:00:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/600b5cd6/051da0eb.mp3" length="37906936" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2368</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode, Dr. Athina Kanioura, Executive Vice President and Chief Strategy and Transformation Officer at PepsiCo, updates us on Pepsico’s pioneering steps in providing technology and opportunities to its workers and partners of all sizes, her wish list for AI and privacy regulation, and the measures she has instilled at Pepsico to establish accountability, transparency, and success in developing responsible AI practices.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Andrew Ng: Should we fear an AI-driven existential crisis?</title>
      <itunes:episode>86</itunes:episode>
      <podcast:episode>86</podcast:episode>
      <itunes:title>Andrew Ng: Should we fear an AI-driven existential crisis?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">822edd8f-1bc7-49ed-b278-cee486f806d8</guid>
      <link>https://share.transistor.fm/s/e5d23b89</link>
      <description>
        <![CDATA[<p>Join us this week with AI-pioneer, Andrew Ng (Founder of DeepLearning.AI, Landing AI, Coursera, General Partner at the AI Found, adjunct professor at Stanford University) as we discuss the likelihood of AI’s existential threat, the merits of regulation, the transformative power of generative AI, and the need for greater AI literacy.</p><p>―</p><p>Resources mentioned in this episode:<br><a href="https://aifund.ai/insights-written-statement-of-andrew-ng-before-the-u-s-senate-ai-insight-forum/">Written Statement of Andrew Ng Before the U.S. Senate Insight Forum</a></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Join us this week with AI-pioneer, Andrew Ng (Founder of DeepLearning.AI, Landing AI, Coursera, General Partner at the AI Found, adjunct professor at Stanford University) as we discuss the likelihood of AI’s existential threat, the merits of regulation, the transformative power of generative AI, and the need for greater AI literacy.</p><p>―</p><p>Resources mentioned in this episode:<br><a href="https://aifund.ai/insights-written-statement-of-andrew-ng-before-the-u-s-senate-ai-insight-forum/">Written Statement of Andrew Ng Before the U.S. Senate Insight Forum</a></p>]]>
      </content:encoded>
      <pubDate>Wed, 24 Jan 2024 05:00:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/e5d23b89/9bcd7056.mp3" length="42135482" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2632</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Join us this week with AI-pioneer, Andrew Ng (Founder of DeepLearning.AI, Landing AI, Coursera, General Partner at the AI Found, adjunct professor at Stanford University) as we discuss the likelihood of AI’s existential threat, the merits of regulation, the transformative power of generative AI, and the need for greater AI literacy.</p><p>―</p><p>Resources mentioned in this episode:<br><a href="https://aifund.ai/insights-written-statement-of-andrew-ng-before-the-u-s-senate-ai-insight-forum/">Written Statement of Andrew Ng Before the U.S. Senate Insight Forum</a></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Kent Walker (Google &amp; Alphabet): How do we make AI safety the new norm? Google's approach to AI safety by design</title>
      <itunes:episode>85</itunes:episode>
      <podcast:episode>85</podcast:episode>
      <itunes:title>Kent Walker (Google &amp; Alphabet): How do we make AI safety the new norm? Google's approach to AI safety by design</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">4bb4e4a1-f56d-4b98-a0ad-2338109c9a2c</guid>
      <link>https://share.transistor.fm/s/69236d8b</link>
      <description>
        <![CDATA[<p>Join us for our first episode of <em>In AI We Trust</em> in 2024 featuring Kent Walker, President of Global Affairs and General Counsel at Google and Alphabet. In this episode, we examine the evolving global regulatory landscape, discuss the launch of Gemini – Google’s latest and most advanced AI model, analyze emerging trends in AI capabilities, and delve into the development of Google’s AI principles. Tune in to hear Kent share his thoughts on responsibility by design, the creation of AI safety norms, and how Google has worked to ensure safety in the midst of AI innovation.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Join us for our first episode of <em>In AI We Trust</em> in 2024 featuring Kent Walker, President of Global Affairs and General Counsel at Google and Alphabet. In this episode, we examine the evolving global regulatory landscape, discuss the launch of Gemini – Google’s latest and most advanced AI model, analyze emerging trends in AI capabilities, and delve into the development of Google’s AI principles. Tune in to hear Kent share his thoughts on responsibility by design, the creation of AI safety norms, and how Google has worked to ensure safety in the midst of AI innovation.</p>]]>
      </content:encoded>
      <pubDate>Wed, 10 Jan 2024 05:00:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/69236d8b/af64e5fa.mp3" length="20408752" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1274</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Join us for our first episode of <em>In AI We Trust</em> in 2024 featuring Kent Walker, President of Global Affairs and General Counsel at Google and Alphabet. In this episode, we examine the evolving global regulatory landscape, discuss the launch of Gemini – Google’s latest and most advanced AI model, analyze emerging trends in AI capabilities, and delve into the development of Google’s AI principles. Tune in to hear Kent share his thoughts on responsibility by design, the creation of AI safety norms, and how Google has worked to ensure safety in the midst of AI innovation.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Raffi Krikorian (Emerson Collective): How to unleash AI “superpowers” for good?</title>
      <itunes:episode>84</itunes:episode>
      <podcast:episode>84</podcast:episode>
      <itunes:title>Raffi Krikorian (Emerson Collective): How to unleash AI “superpowers” for good?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">bd6eb652-04a3-41a1-a275-d25d0b32cd3a</guid>
      <link>https://share.transistor.fm/s/2fcf5bdc</link>
      <description>
        <![CDATA[<p>Join us for a thought-provoking episode as we delve into empowering society with AI “superpowers.” In this final episode of the year of <em>In AI We Trust?</em>, our guest, Raffi Krikorian, CTO of Emerson Collective, shares his insights into the broader landscape of AI using technology to amplify societal impact. Discover his vision on how AI will impact elections and democracy, his call for increased government and academic support for AI's success, how he advocates for widespread AI and tech education, redefines success metrics in AI, and more. Tune in to explore the transformative potential of AI when harnessed for positive change.</p><p>—</p><p>Resources Mentioned this Episode:</p><p><a href="https://www.emersoncollective.com/events/demo-day/demo-day-2023">Emerson Collective 2023 Demo Day</a></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Join us for a thought-provoking episode as we delve into empowering society with AI “superpowers.” In this final episode of the year of <em>In AI We Trust?</em>, our guest, Raffi Krikorian, CTO of Emerson Collective, shares his insights into the broader landscape of AI using technology to amplify societal impact. Discover his vision on how AI will impact elections and democracy, his call for increased government and academic support for AI's success, how he advocates for widespread AI and tech education, redefines success metrics in AI, and more. Tune in to explore the transformative potential of AI when harnessed for positive change.</p><p>—</p><p>Resources Mentioned this Episode:</p><p><a href="https://www.emersoncollective.com/events/demo-day/demo-day-2023">Emerson Collective 2023 Demo Day</a></p>]]>
      </content:encoded>
      <pubDate>Wed, 20 Dec 2023 05:00:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/2fcf5bdc/30410af3.mp3" length="33998756" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2124</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Join us for a thought-provoking episode as we delve into empowering society with AI “superpowers.” In this final episode of the year of <em>In AI We Trust?</em>, our guest, Raffi Krikorian, CTO of Emerson Collective, shares his insights into the broader landscape of AI using technology to amplify societal impact. Discover his vision on how AI will impact elections and democracy, his call for increased government and academic support for AI's success, how he advocates for widespread AI and tech education, redefines success metrics in AI, and more. Tune in to explore the transformative potential of AI when harnessed for positive change.</p><p>—</p><p>Resources Mentioned this Episode:</p><p><a href="https://www.emersoncollective.com/events/demo-day/demo-day-2023">Emerson Collective 2023 Demo Day</a></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Decoding Big Tech's Impact on AI: Insights with Ross Andersen of The Atlantic</title>
      <itunes:episode>83</itunes:episode>
      <podcast:episode>83</podcast:episode>
      <itunes:title>Decoding Big Tech's Impact on AI: Insights with Ross Andersen of The Atlantic</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">7a3793c4-62de-4d9e-8f31-0ef944e2318a</guid>
      <link>https://share.transistor.fm/s/52cb6837</link>
      <description>
        <![CDATA[<p>Join us this week as we delve into the pivotal role played by big tech and its CEOs in shaping AI development and policies. Ross Andersen, staff writer at The Atlantic, offers exclusive insights into the recent changes at OpenAI and discusses AI's historical significance, China's geopolitical influence, and the phenomenon of “foomscrolling.”</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Join us this week as we delve into the pivotal role played by big tech and its CEOs in shaping AI development and policies. Ross Andersen, staff writer at The Atlantic, offers exclusive insights into the recent changes at OpenAI and discusses AI's historical significance, China's geopolitical influence, and the phenomenon of “foomscrolling.”</p>]]>
      </content:encoded>
      <pubDate>Wed, 13 Dec 2023 05:00:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/52cb6837/d179e4b7.mp3" length="34537005" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2157</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Join us this week as we delve into the pivotal role played by big tech and its CEOs in shaping AI development and policies. Ross Andersen, staff writer at The Atlantic, offers exclusive insights into the recent changes at OpenAI and discusses AI's historical significance, China's geopolitical influence, and the phenomenon of “foomscrolling.”</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Amanda Levendowski (Georgetown University): Can AI and copyright law coexist?</title>
      <itunes:episode>82</itunes:episode>
      <podcast:episode>82</podcast:episode>
      <itunes:title>Amanda Levendowski (Georgetown University): Can AI and copyright law coexist?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">f8765ce0-ce15-40a1-8e12-7c3d29175f73</guid>
      <link>https://share.transistor.fm/s/b5c57fe3</link>
      <description>
        <![CDATA[<p>Georgetown University Law Center Associate Professor Amanda Levendowski, and guest co-host Karyn Temple, Senior Executive Vice President and Global General Counsel for the Motion Picture Association (MPA) and EqualAI board member, join <em>In AI We Trust?</em> to explore the protections and limits of copyright law. Tune in to learn more about training AI systems and methods for evaluating the potential harms.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Georgetown University Law Center Associate Professor Amanda Levendowski, and guest co-host Karyn Temple, Senior Executive Vice President and Global General Counsel for the Motion Picture Association (MPA) and EqualAI board member, join <em>In AI We Trust?</em> to explore the protections and limits of copyright law. Tune in to learn more about training AI systems and methods for evaluating the potential harms.</p>]]>
      </content:encoded>
      <pubDate>Wed, 22 Nov 2023 05:00:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/b5c57fe3/308cd8a8.mp3" length="37302652" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2330</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Georgetown University Law Center Associate Professor Amanda Levendowski, and guest co-host Karyn Temple, Senior Executive Vice President and Global General Counsel for the Motion Picture Association (MPA) and EqualAI board member, join <em>In AI We Trust?</em> to explore the protections and limits of copyright law. Tune in to learn more about training AI systems and methods for evaluating the potential harms.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Vijay Karunamurthy (Scale AI): How do companies safely unlock the value of AI? (Hint – through human touch!)</title>
      <itunes:episode>81</itunes:episode>
      <podcast:episode>81</podcast:episode>
      <itunes:title>Vijay Karunamurthy (Scale AI): How do companies safely unlock the value of AI? (Hint – through human touch!)</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">88dbc3b9-7636-4fcc-aad5-26e102b93d0e</guid>
      <link>https://share.transistor.fm/s/23acd79e</link>
      <description>
        <![CDATA[<p>Vijay Karunamurthy, field Chief Technology Officer at Scale AI, joins this week’s episode of <em>In AI We Trust?</em> to discuss how companies can responsibly harness the benefits of AI, the necessary role humans play in that process, and how a diversity of expertise is key to developing functional guardrails. Tune in to hear more on evaluating AI systems at DEFCON 31, the potential to expand our horizons through public-private partnerships, and the Biden-Harris Administration Voluntary AI Commitments.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Vijay Karunamurthy, field Chief Technology Officer at Scale AI, joins this week’s episode of <em>In AI We Trust?</em> to discuss how companies can responsibly harness the benefits of AI, the necessary role humans play in that process, and how a diversity of expertise is key to developing functional guardrails. Tune in to hear more on evaluating AI systems at DEFCON 31, the potential to expand our horizons through public-private partnerships, and the Biden-Harris Administration Voluntary AI Commitments.</p>]]>
      </content:encoded>
      <pubDate>Wed, 01 Nov 2023 19:11:56 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/23acd79e/80f76a87.mp3" length="37061199" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2315</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Vijay Karunamurthy, field Chief Technology Officer at Scale AI, joins this week’s episode of <em>In AI We Trust?</em> to discuss how companies can responsibly harness the benefits of AI, the necessary role humans play in that process, and how a diversity of expertise is key to developing functional guardrails. Tune in to hear more on evaluating AI systems at DEFCON 31, the potential to expand our horizons through public-private partnerships, and the Biden-Harris Administration Voluntary AI Commitments.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Paul Rennie (British Embassy): Why do we need the UK AI Safety Summit (next week)?</title>
      <itunes:episode>80</itunes:episode>
      <podcast:episode>80</podcast:episode>
      <itunes:title>Paul Rennie (British Embassy): Why do we need the UK AI Safety Summit (next week)?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">475da48c-d035-44a2-88f5-6c03804fd296</guid>
      <link>https://share.transistor.fm/s/0716258e</link>
      <description>
        <![CDATA[<p>Paul Rennie, the Head of the Global Economy Group at the British Embassy in Washington D.C., joins this week’s episode of <em>In AI We Trust?</em> to discuss the upcoming U.K. AI Safety Summit, the U.K.’s approach to AI regulation, and the international regulatory landscape of AI. Tune in to learn more about who is participating in the upcoming Summit, what it means to be a responsible AI actor today, and how AI can be used to promote global good.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Paul Rennie, the Head of the Global Economy Group at the British Embassy in Washington D.C., joins this week’s episode of <em>In AI We Trust?</em> to discuss the upcoming U.K. AI Safety Summit, the U.K.’s approach to AI regulation, and the international regulatory landscape of AI. Tune in to learn more about who is participating in the upcoming Summit, what it means to be a responsible AI actor today, and how AI can be used to promote global good.</p>]]>
      </content:encoded>
      <pubDate>Wed, 25 Oct 2023 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/0716258e/5e651b5c.mp3" length="37461468" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2340</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Paul Rennie, the Head of the Global Economy Group at the British Embassy in Washington D.C., joins this week’s episode of <em>In AI We Trust?</em> to discuss the upcoming U.K. AI Safety Summit, the U.K.’s approach to AI regulation, and the international regulatory landscape of AI. Tune in to learn more about who is participating in the upcoming Summit, what it means to be a responsible AI actor today, and how AI can be used to promote global good.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Victoria Espinel (BSA), Reggie Townsend (SAS), and Dawn Bloxwich (Google Deepmind): How can companies responsibly integrate AI into their businesses? (Part 2)</title>
      <itunes:episode>79</itunes:episode>
      <podcast:episode>79</podcast:episode>
      <itunes:title>Victoria Espinel (BSA), Reggie Townsend (SAS), and Dawn Bloxwich (Google Deepmind): How can companies responsibly integrate AI into their businesses? (Part 2)</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">124cc312-3160-4b9f-8a7a-2cdf6c990f1c</guid>
      <link>https://share.transistor.fm/s/54f2a6ae</link>
      <description>
        <![CDATA[<p>In part two of our special episode of <em>In AI We Trust?</em>, EqualAI advisors Victoria Espinel and Reggie Townsend discuss how they got into the field of AI, their involvement in the EqualAI Badge Program and their experiences guiding its participants, and, along with Dawn Bloxwich, discuss how companies can benefit from their co-authored white paper:<em> </em><a href="https://aia-jqffmyq-lef7oe7mnkdla.us.platform.sh/assets/docs/230928_EQUAL_AI_Whitepaper_V4.3.pdf"><em>An Insider’s Guide to Designing and Operationalizing a Responsible AI Governance Framework</em></a><em>.</em></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In part two of our special episode of <em>In AI We Trust?</em>, EqualAI advisors Victoria Espinel and Reggie Townsend discuss how they got into the field of AI, their involvement in the EqualAI Badge Program and their experiences guiding its participants, and, along with Dawn Bloxwich, discuss how companies can benefit from their co-authored white paper:<em> </em><a href="https://aia-jqffmyq-lef7oe7mnkdla.us.platform.sh/assets/docs/230928_EQUAL_AI_Whitepaper_V4.3.pdf"><em>An Insider’s Guide to Designing and Operationalizing a Responsible AI Governance Framework</em></a><em>.</em></p>]]>
      </content:encoded>
      <pubDate>Wed, 11 Oct 2023 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/54f2a6ae/15539d7f.mp3" length="37751190" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2358</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In part two of our special episode of <em>In AI We Trust?</em>, EqualAI advisors Victoria Espinel and Reggie Townsend discuss how they got into the field of AI, their involvement in the EqualAI Badge Program and their experiences guiding its participants, and, along with Dawn Bloxwich, discuss how companies can benefit from their co-authored white paper:<em> </em><a href="https://aia-jqffmyq-lef7oe7mnkdla.us.platform.sh/assets/docs/230928_EQUAL_AI_Whitepaper_V4.3.pdf"><em>An Insider’s Guide to Designing and Operationalizing a Responsible AI Governance Framework</em></a><em>.</em></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Xuning (Mike) Tang (Verizon), Diya Wynn (AWS), and Catherine Goetz (LivePerson): How can companies responsibly integrate AI into their businesses? (Part 1)</title>
      <itunes:episode>78</itunes:episode>
      <podcast:episode>78</podcast:episode>
      <itunes:title>Xuning (Mike) Tang (Verizon), Diya Wynn (AWS), and Catherine Goetz (LivePerson): How can companies responsibly integrate AI into their businesses? (Part 1)</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">bdbbb19c-be72-42ed-8ffa-43e5ad931c50</guid>
      <link>https://share.transistor.fm/s/a6237fe3</link>
      <description>
        <![CDATA[<p>In this week’s special episode of <em>In AI We Trust?</em>, we interview three of our EqualAI Badge Program alumni—Xuning (Mike) Tang (Verizon), Diya Wynn (AWS), and Catherine Goetz (LivePerson)—to discuss their journey’s in the responsible AI field, share their highlights from the EqualAI Badge Program and AI Summit, and underscore the main takeaways from our co-authored white paper: <em>An Insider’s Guide to Designing and Operationalizing a Responsible AI Governance Framework.</em></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this week’s special episode of <em>In AI We Trust?</em>, we interview three of our EqualAI Badge Program alumni—Xuning (Mike) Tang (Verizon), Diya Wynn (AWS), and Catherine Goetz (LivePerson)—to discuss their journey’s in the responsible AI field, share their highlights from the EqualAI Badge Program and AI Summit, and underscore the main takeaways from our co-authored white paper: <em>An Insider’s Guide to Designing and Operationalizing a Responsible AI Governance Framework.</em></p>]]>
      </content:encoded>
      <pubDate>Tue, 03 Oct 2023 17:30:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/a6237fe3/ceebb794.mp3" length="37064897" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2315</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this week’s special episode of <em>In AI We Trust?</em>, we interview three of our EqualAI Badge Program alumni—Xuning (Mike) Tang (Verizon), Diya Wynn (AWS), and Catherine Goetz (LivePerson)—to discuss their journey’s in the responsible AI field, share their highlights from the EqualAI Badge Program and AI Summit, and underscore the main takeaways from our co-authored white paper: <em>An Insider’s Guide to Designing and Operationalizing a Responsible AI Governance Framework.</em></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Rep. Ted Lieu (D-CA): Can Congress regulate AI? </title>
      <itunes:episode>77</itunes:episode>
      <podcast:episode>77</podcast:episode>
      <itunes:title>Rep. Ted Lieu (D-CA): Can Congress regulate AI? </itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">5617a71d-93ba-4d43-a065-74ac2eadf7cf</guid>
      <link>https://share.transistor.fm/s/33e4f669</link>
      <description>
        <![CDATA[<p>Representative Ted Lieu (D-CA) joins this week’s episode of <em>In AI We Trust?</em> to discuss how Congress should approach AI legislation, the impact of generative AI, and U.S. AI efforts on the global stage. Tune in to learn more about Representative Lieu’s computer science focused approach to AI policy and more.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Representative Ted Lieu (D-CA) joins this week’s episode of <em>In AI We Trust?</em> to discuss how Congress should approach AI legislation, the impact of generative AI, and U.S. AI efforts on the global stage. Tune in to learn more about Representative Lieu’s computer science focused approach to AI policy and more.</p>]]>
      </content:encoded>
      <pubDate>Wed, 27 Sep 2023 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/33e4f669/03df516e.mp3" length="27684125" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1729</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Representative Ted Lieu (D-CA) joins this week’s episode of <em>In AI We Trust?</em> to discuss how Congress should approach AI legislation, the impact of generative AI, and U.S. AI efforts on the global stage. Tune in to learn more about Representative Lieu’s computer science focused approach to AI policy and more.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Secretary Michael Chertoff and Lucy Thomson (ABA): How is AI reshaping the legal landscape?</title>
      <itunes:episode>76</itunes:episode>
      <podcast:episode>76</podcast:episode>
      <itunes:title>Secretary Michael Chertoff and Lucy Thomson (ABA): How is AI reshaping the legal landscape?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">92baabf0-09e0-4bbf-b937-ce7246d1416c</guid>
      <link>https://share.transistor.fm/s/4dff2bef</link>
      <description>
        <![CDATA[<p>Tune into this week’s episode of <em>In AI We Trust? </em>with former United States Secretary of Homeland Security, Michael Chertoff, and Lucy Thomson (American Bar Association), to learn the ways in which AI is changing the legal landscape, how the ABA is tackling this issue (spoiler alert: we applaud the launch of the new AI TF), and to learn the Secretary’s “Three D's” of AI governance.</p><p><br></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Tune into this week’s episode of <em>In AI We Trust? </em>with former United States Secretary of Homeland Security, Michael Chertoff, and Lucy Thomson (American Bar Association), to learn the ways in which AI is changing the legal landscape, how the ABA is tackling this issue (spoiler alert: we applaud the launch of the new AI TF), and to learn the Secretary’s “Three D's” of AI governance.</p><p><br></p>]]>
      </content:encoded>
      <pubDate>Wed, 30 Aug 2023 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/4dff2bef/ab13e7fc.mp3" length="48700818" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3042</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Tune into this week’s episode of <em>In AI We Trust? </em>with former United States Secretary of Homeland Security, Michael Chertoff, and Lucy Thomson (American Bar Association), to learn the ways in which AI is changing the legal landscape, how the ABA is tackling this issue (spoiler alert: we applaud the launch of the new AI TF), and to learn the Secretary’s “Three D's” of AI governance.</p><p><br></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Sarah Hammer (Wharton School) and Dr. Philipp Hacker (European University Viadrina): Can AI accelerate the UN Sustainable Development Goals (SDGs)?</title>
      <itunes:episode>75</itunes:episode>
      <podcast:episode>75</podcast:episode>
      <itunes:title>Sarah Hammer (Wharton School) and Dr. Philipp Hacker (European University Viadrina): Can AI accelerate the UN Sustainable Development Goals (SDGs)?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">2851db1d-bcba-4e88-9972-83302731d1d5</guid>
      <link>https://share.transistor.fm/s/1a2bc152</link>
      <description>
        <![CDATA[<p>Professor Sarah Hammer, Executive Director at the Wharton School of the U. of Penn and leads Wharton Cypher Accelerator and Dr. Philipp Hacker, Chair for Law and Ethics of the Digital Society at the European New School of Digital Studies at European University join this week on <em>In AI We Trust?</em> to debrief their recent #AIforGood Conference. Listen to the discussion for insights on how financial regulation, sustainability in AI, content moderation, and other opportunities for international collaboration around AI will help advance UN SDG goals.</p><p>—</p><p>Resources Mentioned This Episode:<br><a href="https://aiforgood.itu.int/">AI for Good Global Summit</a></p><p><a href="https://www.youtube.com/watch?v=_FcSt3aX9d0">AI for Good Global Summit 2023: Input Statement by Professor Philipp Hacker</a></p><p><a href="https://arxiv.org/abs/2302.02337">Regulating ChatGPT and other Large Generative AI Models</a></p><p><a href="https://arxiv.org/abs/2211.13960">The European AI Liability Directives – Critique of a Half-Hearted Approach and Lessons for the Future</a></p><p><a href="https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3164973">Teaching Fairness to Artificial Intelligence: Existing and Novel Strategies Against Algorithmic Discrimination Under EU Law</a></p><p><a href="https://arxiv.org/abs/2306.00292">Sustainable AI Regulation</a></p><p><a href="https://aiforgood.itu.int/event/legal-and-technical-challenges-of-large-generative-ai-models/">Legal and technical challenges of large generative AI models</a></p><p><a href="https://arxiv.org/abs/2302.02337">Regulating ChatGPT and other Large Generative AI Models</a></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Professor Sarah Hammer, Executive Director at the Wharton School of the U. of Penn and leads Wharton Cypher Accelerator and Dr. Philipp Hacker, Chair for Law and Ethics of the Digital Society at the European New School of Digital Studies at European University join this week on <em>In AI We Trust?</em> to debrief their recent #AIforGood Conference. Listen to the discussion for insights on how financial regulation, sustainability in AI, content moderation, and other opportunities for international collaboration around AI will help advance UN SDG goals.</p><p>—</p><p>Resources Mentioned This Episode:<br><a href="https://aiforgood.itu.int/">AI for Good Global Summit</a></p><p><a href="https://www.youtube.com/watch?v=_FcSt3aX9d0">AI for Good Global Summit 2023: Input Statement by Professor Philipp Hacker</a></p><p><a href="https://arxiv.org/abs/2302.02337">Regulating ChatGPT and other Large Generative AI Models</a></p><p><a href="https://arxiv.org/abs/2211.13960">The European AI Liability Directives – Critique of a Half-Hearted Approach and Lessons for the Future</a></p><p><a href="https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3164973">Teaching Fairness to Artificial Intelligence: Existing and Novel Strategies Against Algorithmic Discrimination Under EU Law</a></p><p><a href="https://arxiv.org/abs/2306.00292">Sustainable AI Regulation</a></p><p><a href="https://aiforgood.itu.int/event/legal-and-technical-challenges-of-large-generative-ai-models/">Legal and technical challenges of large generative AI models</a></p><p><a href="https://arxiv.org/abs/2302.02337">Regulating ChatGPT and other Large Generative AI Models</a></p>]]>
      </content:encoded>
      <pubDate>Wed, 26 Jul 2023 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/1a2bc152/5bb40298.mp3" length="57012828" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3562</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Professor Sarah Hammer, Executive Director at the Wharton School of the U. of Penn and leads Wharton Cypher Accelerator and Dr. Philipp Hacker, Chair for Law and Ethics of the Digital Society at the European New School of Digital Studies at European University join this week on <em>In AI We Trust?</em> to debrief their recent #AIforGood Conference. Listen to the discussion for insights on how financial regulation, sustainability in AI, content moderation, and other opportunities for international collaboration around AI will help advance UN SDG goals.</p><p>—</p><p>Resources Mentioned This Episode:<br><a href="https://aiforgood.itu.int/">AI for Good Global Summit</a></p><p><a href="https://www.youtube.com/watch?v=_FcSt3aX9d0">AI for Good Global Summit 2023: Input Statement by Professor Philipp Hacker</a></p><p><a href="https://arxiv.org/abs/2302.02337">Regulating ChatGPT and other Large Generative AI Models</a></p><p><a href="https://arxiv.org/abs/2211.13960">The European AI Liability Directives – Critique of a Half-Hearted Approach and Lessons for the Future</a></p><p><a href="https://papers.ssrn.com/sol3/papers.cfm?abstract_id=3164973">Teaching Fairness to Artificial Intelligence: Existing and Novel Strategies Against Algorithmic Discrimination Under EU Law</a></p><p><a href="https://arxiv.org/abs/2306.00292">Sustainable AI Regulation</a></p><p><a href="https://aiforgood.itu.int/event/legal-and-technical-challenges-of-large-generative-ai-models/">Legal and technical challenges of large generative AI models</a></p><p><a href="https://arxiv.org/abs/2302.02337">Regulating ChatGPT and other Large Generative AI Models</a></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Chair Charlotte Burrows (EEOC): Is your AI system violating civil rights laws?</title>
      <itunes:episode>74</itunes:episode>
      <podcast:episode>74</podcast:episode>
      <itunes:title>Chair Charlotte Burrows (EEOC): Is your AI system violating civil rights laws?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">f83735e0-ea30-4a9a-a58a-987f68d24a1b</guid>
      <link>https://share.transistor.fm/s/9701df6d</link>
      <description>
        <![CDATA[<p>In this week’s episode, we are joined by Chair of the U.S. Equal Employment Opportunity Commission (EEOC) Charlotte Burrows, who highlights the EEOC’s work to address AI proliferation in the employment sphere. She discusses the need to increase education of the public on how AI is being used, EEOC guidance on key civil rights bills such as the Americans with Disabilities Act (ADA) and Title VII of the Civil Rights Act of 1964 (Title VII), as well as key points employers should be aware of when deploying AI.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this week’s episode, we are joined by Chair of the U.S. Equal Employment Opportunity Commission (EEOC) Charlotte Burrows, who highlights the EEOC’s work to address AI proliferation in the employment sphere. She discusses the need to increase education of the public on how AI is being used, EEOC guidance on key civil rights bills such as the Americans with Disabilities Act (ADA) and Title VII of the Civil Rights Act of 1964 (Title VII), as well as key points employers should be aware of when deploying AI.</p>]]>
      </content:encoded>
      <pubDate>Wed, 12 Jul 2023 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/9701df6d/55a54aa1.mp3" length="43854986" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2740</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this week’s episode, we are joined by Chair of the U.S. Equal Employment Opportunity Commission (EEOC) Charlotte Burrows, who highlights the EEOC’s work to address AI proliferation in the employment sphere. She discusses the need to increase education of the public on how AI is being used, EEOC guidance on key civil rights bills such as the Americans with Disabilities Act (ADA) and Title VII of the Civil Rights Act of 1964 (Title VII), as well as key points employers should be aware of when deploying AI.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Kevin McKee (DeepMind): How does AI influence the core of being human?</title>
      <itunes:episode>73</itunes:episode>
      <podcast:episode>73</podcast:episode>
      <itunes:title>Kevin McKee (DeepMind): How does AI influence the core of being human?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">8e17d13c-7df7-4677-aabc-3c752c0d1c9b</guid>
      <link>https://share.transistor.fm/s/cbb46a40</link>
      <description>
        <![CDATA[<p>Tune in to this week’s episode of <em>In AI We Trust?</em>, where Kevin McKee, Senior Research Scientist at Google DeepMind, discusses issues of AI fairness, AI’s impact on the LGBT+ community, and the balance between developing AI that humans can trust and the anthropomorphization of technology. Kevin leads research projects focused on machine learning, social psychology, and sociotechnical systems and has worked on algorithmic development and evaluation, environment design, and data analysis.</p><p>—</p><p>Resources Mentioned this Episode:<br><a href="https://www.technologyreview.com/2023/06/28/1075683/humans-may-be-more-likely-to-believe-disinformation-generated-by-ai/?truid=&amp;utm_source=the_download&amp;utm_medium=email&amp;utm_campaign=the_download.unpaid.engagement&amp;utm_term=&amp;utm_content=06-29-2023&amp;mc_cid=28b5d3abb6&amp;mc_eid=a8112f809a">Humans may be more likely to believe disinformation generated by AI</a></p><p><a href="https://www.thetimes.co.uk/article/countries-must-act-now-over-ethics-of-artificial-intelligence-qmrmfdhnn">Countries Must Act Now Over Ethics of Artificial Intelligence</a></p><p><a href="https://www.axios.com/2023/06/28/online-hate-harassment-rise-adl">Online hate and harassment continues to rise</a></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Tune in to this week’s episode of <em>In AI We Trust?</em>, where Kevin McKee, Senior Research Scientist at Google DeepMind, discusses issues of AI fairness, AI’s impact on the LGBT+ community, and the balance between developing AI that humans can trust and the anthropomorphization of technology. Kevin leads research projects focused on machine learning, social psychology, and sociotechnical systems and has worked on algorithmic development and evaluation, environment design, and data analysis.</p><p>—</p><p>Resources Mentioned this Episode:<br><a href="https://www.technologyreview.com/2023/06/28/1075683/humans-may-be-more-likely-to-believe-disinformation-generated-by-ai/?truid=&amp;utm_source=the_download&amp;utm_medium=email&amp;utm_campaign=the_download.unpaid.engagement&amp;utm_term=&amp;utm_content=06-29-2023&amp;mc_cid=28b5d3abb6&amp;mc_eid=a8112f809a">Humans may be more likely to believe disinformation generated by AI</a></p><p><a href="https://www.thetimes.co.uk/article/countries-must-act-now-over-ethics-of-artificial-intelligence-qmrmfdhnn">Countries Must Act Now Over Ethics of Artificial Intelligence</a></p><p><a href="https://www.axios.com/2023/06/28/online-hate-harassment-rise-adl">Online hate and harassment continues to rise</a></p>]]>
      </content:encoded>
      <pubDate>Wed, 05 Jul 2023 13:10:10 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/cbb46a40/92aaca95.mp3" length="34539085" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2157</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Tune in to this week’s episode of <em>In AI We Trust?</em>, where Kevin McKee, Senior Research Scientist at Google DeepMind, discusses issues of AI fairness, AI’s impact on the LGBT+ community, and the balance between developing AI that humans can trust and the anthropomorphization of technology. Kevin leads research projects focused on machine learning, social psychology, and sociotechnical systems and has worked on algorithmic development and evaluation, environment design, and data analysis.</p><p>—</p><p>Resources Mentioned this Episode:<br><a href="https://www.technologyreview.com/2023/06/28/1075683/humans-may-be-more-likely-to-believe-disinformation-generated-by-ai/?truid=&amp;utm_source=the_download&amp;utm_medium=email&amp;utm_campaign=the_download.unpaid.engagement&amp;utm_term=&amp;utm_content=06-29-2023&amp;mc_cid=28b5d3abb6&amp;mc_eid=a8112f809a">Humans may be more likely to believe disinformation generated by AI</a></p><p><a href="https://www.thetimes.co.uk/article/countries-must-act-now-over-ethics-of-artificial-intelligence-qmrmfdhnn">Countries Must Act Now Over Ethics of Artificial Intelligence</a></p><p><a href="https://www.axios.com/2023/06/28/online-hate-harassment-rise-adl">Online hate and harassment continues to rise</a></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Chris Wood (LGBT Tech): How can we ensure our LGBT+ voices are heard through our data?</title>
      <itunes:episode>72</itunes:episode>
      <podcast:episode>72</podcast:episode>
      <itunes:title>Chris Wood (LGBT Tech): How can we ensure our LGBT+ voices are heard through our data?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">0cda0d92-bfb0-44c2-be42-c0b8ebac8fa8</guid>
      <link>https://share.transistor.fm/s/483ad403</link>
      <description>
        <![CDATA[<p>This week on <em>In AI we Trust?</em> Executive Director of LGBT Tech, Chris Wood, joins Miriam Vogel and guest-co-host Kathy Baxter for a special episode in celebration of Pride Month. Join this week’s conversation on the duality of technology for the LGBT community – how it can be an impactful medium to foster connection in the LGBT+ community or a harmful tool leveraged against the same individuals – the significance of diversity in tech, the complexity of representation in our datasets, as well as his important research and other initiatives that range from broadband access in rural communities to building an AI of their own. </p><p><br>—</p><p>Resources Mentioned this Episode:<br><a href="https://www.lgbttech.org/mission">LGBT Tech Website</a><br><a href="https://www.lgbttech.org/_files/ugd/699ad7_b0219ea9c8804c05a03d95ca7f911f78.pdf">Vision For Inclusion: An LGBT Broadband Future</a><br><a href="https://www.lgbttech.org/programs">LGBT Tech Programs</a></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>This week on <em>In AI we Trust?</em> Executive Director of LGBT Tech, Chris Wood, joins Miriam Vogel and guest-co-host Kathy Baxter for a special episode in celebration of Pride Month. Join this week’s conversation on the duality of technology for the LGBT community – how it can be an impactful medium to foster connection in the LGBT+ community or a harmful tool leveraged against the same individuals – the significance of diversity in tech, the complexity of representation in our datasets, as well as his important research and other initiatives that range from broadband access in rural communities to building an AI of their own. </p><p><br>—</p><p>Resources Mentioned this Episode:<br><a href="https://www.lgbttech.org/mission">LGBT Tech Website</a><br><a href="https://www.lgbttech.org/_files/ugd/699ad7_b0219ea9c8804c05a03d95ca7f911f78.pdf">Vision For Inclusion: An LGBT Broadband Future</a><br><a href="https://www.lgbttech.org/programs">LGBT Tech Programs</a></p>]]>
      </content:encoded>
      <pubDate>Wed, 28 Jun 2023 09:05:24 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/483ad403/9d1a0396.mp3" length="43215518" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2700</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>This week on <em>In AI we Trust?</em> Executive Director of LGBT Tech, Chris Wood, joins Miriam Vogel and guest-co-host Kathy Baxter for a special episode in celebration of Pride Month. Join this week’s conversation on the duality of technology for the LGBT community – how it can be an impactful medium to foster connection in the LGBT+ community or a harmful tool leveraged against the same individuals – the significance of diversity in tech, the complexity of representation in our datasets, as well as his important research and other initiatives that range from broadband access in rural communities to building an AI of their own. </p><p><br>—</p><p>Resources Mentioned this Episode:<br><a href="https://www.lgbttech.org/mission">LGBT Tech Website</a><br><a href="https://www.lgbttech.org/_files/ugd/699ad7_b0219ea9c8804c05a03d95ca7f911f78.pdf">Vision For Inclusion: An LGBT Broadband Future</a><br><a href="https://www.lgbttech.org/programs">LGBT Tech Programs</a></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Gilman Louie (America’s Frontier Fund, CEO of In-Q-Tel, NSCAI Comm’r): How will we respond to this ‘Sputnik’ moment?</title>
      <itunes:episode>71</itunes:episode>
      <podcast:episode>71</podcast:episode>
      <itunes:title>Gilman Louie (America’s Frontier Fund, CEO of In-Q-Tel, NSCAI Comm’r): How will we respond to this ‘Sputnik’ moment?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">dee749e2-cd8d-4281-891a-dec4c05e1832</guid>
      <link>https://share.transistor.fm/s/a608ce69</link>
      <description>
        <![CDATA[<p>Gilman Louie is CEO and co-founder of America’s Frontier Fund, CEO of In-Q-Tel, and an NSCAI Commissioner. Tune into this week’s episode of <em>In AI We Trust</em>, where Gilman shares his thoughts on the government's role in regulating, funding, and convening key stakeholders to promote responsible AI. Gilman invokes similar moments of technological innovation in our history to contextualize the opportunity in the U.S. at this moment to set the standards in the AI race; and considers the challenges that derive from our “click economy”. Hear these thoughts and more in this great episode.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Gilman Louie is CEO and co-founder of America’s Frontier Fund, CEO of In-Q-Tel, and an NSCAI Commissioner. Tune into this week’s episode of <em>In AI We Trust</em>, where Gilman shares his thoughts on the government's role in regulating, funding, and convening key stakeholders to promote responsible AI. Gilman invokes similar moments of technological innovation in our history to contextualize the opportunity in the U.S. at this moment to set the standards in the AI race; and considers the challenges that derive from our “click economy”. Hear these thoughts and more in this great episode.</p>]]>
      </content:encoded>
      <pubDate>Wed, 24 May 2023 09:10:12 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/a608ce69/23b56067.mp3" length="51292309" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3204</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Gilman Louie is CEO and co-founder of America’s Frontier Fund, CEO of In-Q-Tel, and an NSCAI Commissioner. Tune into this week’s episode of <em>In AI We Trust</em>, where Gilman shares his thoughts on the government's role in regulating, funding, and convening key stakeholders to promote responsible AI. Gilman invokes similar moments of technological innovation in our history to contextualize the opportunity in the U.S. at this moment to set the standards in the AI race; and considers the challenges that derive from our “click economy”. Hear these thoughts and more in this great episode.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Rep. Chrissy Houlahan (D-PA): How do we prepare Congress for the age of AI?</title>
      <itunes:episode>70</itunes:episode>
      <podcast:episode>70</podcast:episode>
      <itunes:title>Rep. Chrissy Houlahan (D-PA): How do we prepare Congress for the age of AI?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">d8fc1983-99f7-428a-b86d-584a2061e410</guid>
      <link>https://share.transistor.fm/s/fa6a6cd3</link>
      <description>
        <![CDATA[<p>Meet one of the Bad A#%* women in Congress, Representative Chrissy Houlahan (D-PA). She is a trailblazer: a strong advocate for and accomplished practitioner in STEAM (science, technology, engineering, art and math) as an engineer, Air Force veteran, successful entrepreneur and former chemistry teacher. This week on <em>In AI We Trust?</em> Miriam Vogel and special guest co-host Victoria Espinel of #BSA ask Representative Houlahan to share her unique perspective on why – and how – Congress must do more to support our veterans, women, entrepreneurship and how this relates to her work in Congress on AI policy.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Meet one of the Bad A#%* women in Congress, Representative Chrissy Houlahan (D-PA). She is a trailblazer: a strong advocate for and accomplished practitioner in STEAM (science, technology, engineering, art and math) as an engineer, Air Force veteran, successful entrepreneur and former chemistry teacher. This week on <em>In AI We Trust?</em> Miriam Vogel and special guest co-host Victoria Espinel of #BSA ask Representative Houlahan to share her unique perspective on why – and how – Congress must do more to support our veterans, women, entrepreneurship and how this relates to her work in Congress on AI policy.</p>]]>
      </content:encoded>
      <pubDate>Wed, 03 May 2023 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/fa6a6cd3/93913152.mp3" length="28836039" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1801</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Meet one of the Bad A#%* women in Congress, Representative Chrissy Houlahan (D-PA). She is a trailblazer: a strong advocate for and accomplished practitioner in STEAM (science, technology, engineering, art and math) as an engineer, Air Force veteran, successful entrepreneur and former chemistry teacher. This week on <em>In AI We Trust?</em> Miriam Vogel and special guest co-host Victoria Espinel of #BSA ask Representative Houlahan to share her unique perspective on why – and how – Congress must do more to support our veterans, women, entrepreneurship and how this relates to her work in Congress on AI policy.</p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Dr. Haniyeh Mahmoudian (DataRobot): Who should be involved in AI ethics?</title>
      <itunes:episode>69</itunes:episode>
      <podcast:episode>69</podcast:episode>
      <itunes:title>Dr. Haniyeh Mahmoudian (DataRobot): Who should be involved in AI ethics?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">9a0e8fb9-062d-436f-b098-255dd6dbcfe8</guid>
      <link>https://share.transistor.fm/s/ddcbc675</link>
      <description>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em> Dr. Haniyeh Mahmoudian, Global AI Ethicist at DataRobot, provides insight into the timely and critical role of an AI ethicist. Haniyeh explains how culture is a key element of responsible AI development. She also reflects on the questions to ask in advance of designing an AI model and the importance of engaging multiple stakeholders to design AI effectively. Tune in to this episode to learn these and other insights from an industry thought leader.</p><p><br>—</p><p><strong>Resources mentioned in this episode:</strong></p><ul><li><a href="https://towardsdatascience.com/how-to-tackle-ai-bias-ec39313ccacf">How to Tackle AI Bias</a> (Haniyeh Mahmoudian, PhD)</li></ul>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em> Dr. Haniyeh Mahmoudian, Global AI Ethicist at DataRobot, provides insight into the timely and critical role of an AI ethicist. Haniyeh explains how culture is a key element of responsible AI development. She also reflects on the questions to ask in advance of designing an AI model and the importance of engaging multiple stakeholders to design AI effectively. Tune in to this episode to learn these and other insights from an industry thought leader.</p><p><br>—</p><p><strong>Resources mentioned in this episode:</strong></p><ul><li><a href="https://towardsdatascience.com/how-to-tackle-ai-bias-ec39313ccacf">How to Tackle AI Bias</a> (Haniyeh Mahmoudian, PhD)</li></ul>]]>
      </content:encoded>
      <pubDate>Wed, 26 Apr 2023 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/ddcbc675/a602b189.mp3" length="40016445" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2500</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode of <em>In AI We Trust?</em> Dr. Haniyeh Mahmoudian, Global AI Ethicist at DataRobot, provides insight into the timely and critical role of an AI ethicist. Haniyeh explains how culture is a key element of responsible AI development. She also reflects on the questions to ask in advance of designing an AI model and the importance of engaging multiple stakeholders to design AI effectively. Tune in to this episode to learn these and other insights from an industry thought leader.</p><p><br>—</p><p><strong>Resources mentioned in this episode:</strong></p><ul><li><a href="https://towardsdatascience.com/how-to-tackle-ai-bias-ec39313ccacf">How to Tackle AI Bias</a> (Haniyeh Mahmoudian, PhD)</li></ul>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Justin Hotard (Hewlett Packard Enterprise): Are local communities and data the key to unlocking better AI? </title>
      <itunes:episode>68</itunes:episode>
      <podcast:episode>68</podcast:episode>
      <itunes:title>Justin Hotard (Hewlett Packard Enterprise): Are local communities and data the key to unlocking better AI? </itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">29241fad-b089-446e-a7f0-7aa08bc36784</guid>
      <link>https://share.transistor.fm/s/3cf4075b</link>
      <description>
        <![CDATA[<p>Justin Hotard leads the High Performance Computing (HPC) &amp; AI business group at Hewlett Packard Enterprise (HPE). Tune in to <em>In AI we Trust?</em> this week as he discusses supercomputing, HPE’s commitment to open source models for global standardization and using responsible data to ensure responsible AI. </p><p>–</p><p><strong>Resources mentioned in this episode:</strong></p><ul><li><a href="https://www.weforum.org/agenda/2023/01/how-supercomputers-are-changing-the-world-as-we-know-it/">What are supercomputers and why are they important? An expert explains</a> (Justin Hotard &amp; the World Economic Forum)</li><li><a href="https://www.hpe.com/us/en/newsroom/blog-post/2022/05/fueling-ai-for-good-with-supercomputing.html">Fueling AI for good with supercomputing</a> (Justin Hotard &amp; HPE)</li><li><a href="https://www.hpe.com/us/en/newsroom/press-release/2022/04/hewlett-packard-enterprise-ushers-in-next-era-in-ai-innovation-with-swarm-learning-solution-built-for-the-edge-and-distributed-sites.html">Hewlett Packard Enterprise ushers in next era in AI innovation with Swarm Learning solution built for the edge and distributed sites</a> (HPE)</li></ul>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Justin Hotard leads the High Performance Computing (HPC) &amp; AI business group at Hewlett Packard Enterprise (HPE). Tune in to <em>In AI we Trust?</em> this week as he discusses supercomputing, HPE’s commitment to open source models for global standardization and using responsible data to ensure responsible AI. </p><p>–</p><p><strong>Resources mentioned in this episode:</strong></p><ul><li><a href="https://www.weforum.org/agenda/2023/01/how-supercomputers-are-changing-the-world-as-we-know-it/">What are supercomputers and why are they important? An expert explains</a> (Justin Hotard &amp; the World Economic Forum)</li><li><a href="https://www.hpe.com/us/en/newsroom/blog-post/2022/05/fueling-ai-for-good-with-supercomputing.html">Fueling AI for good with supercomputing</a> (Justin Hotard &amp; HPE)</li><li><a href="https://www.hpe.com/us/en/newsroom/press-release/2022/04/hewlett-packard-enterprise-ushers-in-next-era-in-ai-innovation-with-swarm-learning-solution-built-for-the-edge-and-distributed-sites.html">Hewlett Packard Enterprise ushers in next era in AI innovation with Swarm Learning solution built for the edge and distributed sites</a> (HPE)</li></ul>]]>
      </content:encoded>
      <pubDate>Wed, 05 Apr 2023 09:05:30 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/3cf4075b/f6484e4d.mp3" length="38644827" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2414</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Justin Hotard leads the High Performance Computing (HPC) &amp; AI business group at Hewlett Packard Enterprise (HPE). Tune in to <em>In AI we Trust?</em> this week as he discusses supercomputing, HPE’s commitment to open source models for global standardization and using responsible data to ensure responsible AI. </p><p>–</p><p><strong>Resources mentioned in this episode:</strong></p><ul><li><a href="https://www.weforum.org/agenda/2023/01/how-supercomputers-are-changing-the-world-as-we-know-it/">What are supercomputers and why are they important? An expert explains</a> (Justin Hotard &amp; the World Economic Forum)</li><li><a href="https://www.hpe.com/us/en/newsroom/blog-post/2022/05/fueling-ai-for-good-with-supercomputing.html">Fueling AI for good with supercomputing</a> (Justin Hotard &amp; HPE)</li><li><a href="https://www.hpe.com/us/en/newsroom/press-release/2022/04/hewlett-packard-enterprise-ushers-in-next-era-in-ai-innovation-with-swarm-learning-solution-built-for-the-edge-and-distributed-sites.html">Hewlett Packard Enterprise ushers in next era in AI innovation with Swarm Learning solution built for the edge and distributed sites</a> (HPE)</li></ul>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Jordan Crenshaw (U.S. Chamber of Commerce): Can your company survive without AI adoption?</title>
      <itunes:episode>67</itunes:episode>
      <podcast:episode>67</podcast:episode>
      <itunes:title>Jordan Crenshaw (U.S. Chamber of Commerce): Can your company survive without AI adoption?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">4f4fd642-ac44-4d3e-8833-14cefcf607bc</guid>
      <link>https://share.transistor.fm/s/80426926</link>
      <description>
        <![CDATA[<p>Based on the testimony of 87 witnesses from 5 field hearings across the US, the U.S. Chamber of Commerce bipartisan AI Commission on Competition, Inclusion, and Innovation released a report yesterday, addressing the state of AI. Tune in this week to hear the U.S. Chamber’s Technology Engagement Center (C_TEC) VP, Jordan Crenshaw share key takeaways from this and other recent C_TEC reports, why tech issues are business issues, the importance of digitizing government data, and the critical impact of tech on small businesses. </p><p><br>—</p><p><br></p><p>Materials mentioned in this episode:</p><p><a href="https://www.uschamber.com/technology/artificial-intelligence-commission-report"><strong>The U.S. Chamber’s AI Commission report</strong></a><strong> (U.S. Chamber of Commerce)</strong></p><p><a href="https://urldefense.proofpoint.com/v2/url?u=https-3A__www2.deloitte.com_us_en_pages_consulting_articles_investing-2Din-2Dai-2Dtrust.html&amp;d=DwMFaQ&amp;c=NxS7LVD4EucgUR9_G6bWzuqhmQ0xEJ2AZdqjz4WaSHU&amp;r=B7ZOiliriza76RYLXP93n3u-GoSA3z6riZ_8oCAMbEA&amp;m=1m9zdOqIQ_UdLrLV-LvtelCvl_wifhmvAqTtDs_lAFalRBjijImRrlgnAOr6BthU&amp;s=9fOP2DHFrzolz1_vEf0OxRqSv-cWLXo4gFCtPVABvgc&amp;e="><strong>Investing in Trustworthy AI</strong></a><strong> (U.S. Chamber of Commerce &amp; Deloitte)</strong></p><p><a href="https://urldefense.proofpoint.com/v2/url?u=https-3A__www.uschamber.com_technology_us-2Dchamber-2Dreleases-2Dartificial-2Dintelligence-2Dprinciples&amp;d=DwMFaQ&amp;c=NxS7LVD4EucgUR9_G6bWzuqhmQ0xEJ2AZdqjz4WaSHU&amp;r=B7ZOiliriza76RYLXP93n3u-GoSA3z6riZ_8oCAMbEA&amp;m=1m9zdOqIQ_UdLrLV-LvtelCvl_wifhmvAqTtDs_lAFalRBjijImRrlgnAOr6BthU&amp;s=Z7sThUVa0fzu4sKgBdsVqtZ8inA-4nBKSpfhVdEyZAQ&amp;e="><strong>U.S. Chamber Artificial Intelligence Principles</strong></a><strong> (U.S. Chamber of Commerce)</strong></p><p><a href="https://urldefense.proofpoint.com/v2/url?u=https-3A__americaninnovators.com_empowering-2Dsmall-2Dbusiness_&amp;d=DwMFaQ&amp;c=NxS7LVD4EucgUR9_G6bWzuqhmQ0xEJ2AZdqjz4WaSHU&amp;r=B7ZOiliriza76RYLXP93n3u-GoSA3z6riZ_8oCAMbEA&amp;m=1m9zdOqIQ_UdLrLV-LvtelCvl_wifhmvAqTtDs_lAFalRBjijImRrlgnAOr6BthU&amp;s=DUEcBuKAZYr6sHKzkDic28y8cgIjdyQV5qldHBaxaqE&amp;e="><strong>Impact of Technology on U.S. Small Businesses </strong></a><strong>(U.S. Chamber of Commerce Technology Engagement Center)</strong></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Based on the testimony of 87 witnesses from 5 field hearings across the US, the U.S. Chamber of Commerce bipartisan AI Commission on Competition, Inclusion, and Innovation released a report yesterday, addressing the state of AI. Tune in this week to hear the U.S. Chamber’s Technology Engagement Center (C_TEC) VP, Jordan Crenshaw share key takeaways from this and other recent C_TEC reports, why tech issues are business issues, the importance of digitizing government data, and the critical impact of tech on small businesses. </p><p><br>—</p><p><br></p><p>Materials mentioned in this episode:</p><p><a href="https://www.uschamber.com/technology/artificial-intelligence-commission-report"><strong>The U.S. Chamber’s AI Commission report</strong></a><strong> (U.S. Chamber of Commerce)</strong></p><p><a href="https://urldefense.proofpoint.com/v2/url?u=https-3A__www2.deloitte.com_us_en_pages_consulting_articles_investing-2Din-2Dai-2Dtrust.html&amp;d=DwMFaQ&amp;c=NxS7LVD4EucgUR9_G6bWzuqhmQ0xEJ2AZdqjz4WaSHU&amp;r=B7ZOiliriza76RYLXP93n3u-GoSA3z6riZ_8oCAMbEA&amp;m=1m9zdOqIQ_UdLrLV-LvtelCvl_wifhmvAqTtDs_lAFalRBjijImRrlgnAOr6BthU&amp;s=9fOP2DHFrzolz1_vEf0OxRqSv-cWLXo4gFCtPVABvgc&amp;e="><strong>Investing in Trustworthy AI</strong></a><strong> (U.S. Chamber of Commerce &amp; Deloitte)</strong></p><p><a href="https://urldefense.proofpoint.com/v2/url?u=https-3A__www.uschamber.com_technology_us-2Dchamber-2Dreleases-2Dartificial-2Dintelligence-2Dprinciples&amp;d=DwMFaQ&amp;c=NxS7LVD4EucgUR9_G6bWzuqhmQ0xEJ2AZdqjz4WaSHU&amp;r=B7ZOiliriza76RYLXP93n3u-GoSA3z6riZ_8oCAMbEA&amp;m=1m9zdOqIQ_UdLrLV-LvtelCvl_wifhmvAqTtDs_lAFalRBjijImRrlgnAOr6BthU&amp;s=Z7sThUVa0fzu4sKgBdsVqtZ8inA-4nBKSpfhVdEyZAQ&amp;e="><strong>U.S. Chamber Artificial Intelligence Principles</strong></a><strong> (U.S. Chamber of Commerce)</strong></p><p><a href="https://urldefense.proofpoint.com/v2/url?u=https-3A__americaninnovators.com_empowering-2Dsmall-2Dbusiness_&amp;d=DwMFaQ&amp;c=NxS7LVD4EucgUR9_G6bWzuqhmQ0xEJ2AZdqjz4WaSHU&amp;r=B7ZOiliriza76RYLXP93n3u-GoSA3z6riZ_8oCAMbEA&amp;m=1m9zdOqIQ_UdLrLV-LvtelCvl_wifhmvAqTtDs_lAFalRBjijImRrlgnAOr6BthU&amp;s=DUEcBuKAZYr6sHKzkDic28y8cgIjdyQV5qldHBaxaqE&amp;e="><strong>Impact of Technology on U.S. Small Businesses </strong></a><strong>(U.S. Chamber of Commerce Technology Engagement Center)</strong></p>]]>
      </content:encoded>
      <pubDate>Fri, 10 Mar 2023 05:00:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/80426926/044b0f59.mp3" length="36512371" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2281</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Based on the testimony of 87 witnesses from 5 field hearings across the US, the U.S. Chamber of Commerce bipartisan AI Commission on Competition, Inclusion, and Innovation released a report yesterday, addressing the state of AI. Tune in this week to hear the U.S. Chamber’s Technology Engagement Center (C_TEC) VP, Jordan Crenshaw share key takeaways from this and other recent C_TEC reports, why tech issues are business issues, the importance of digitizing government data, and the critical impact of tech on small businesses. </p><p><br>—</p><p><br></p><p>Materials mentioned in this episode:</p><p><a href="https://www.uschamber.com/technology/artificial-intelligence-commission-report"><strong>The U.S. Chamber’s AI Commission report</strong></a><strong> (U.S. Chamber of Commerce)</strong></p><p><a href="https://urldefense.proofpoint.com/v2/url?u=https-3A__www2.deloitte.com_us_en_pages_consulting_articles_investing-2Din-2Dai-2Dtrust.html&amp;d=DwMFaQ&amp;c=NxS7LVD4EucgUR9_G6bWzuqhmQ0xEJ2AZdqjz4WaSHU&amp;r=B7ZOiliriza76RYLXP93n3u-GoSA3z6riZ_8oCAMbEA&amp;m=1m9zdOqIQ_UdLrLV-LvtelCvl_wifhmvAqTtDs_lAFalRBjijImRrlgnAOr6BthU&amp;s=9fOP2DHFrzolz1_vEf0OxRqSv-cWLXo4gFCtPVABvgc&amp;e="><strong>Investing in Trustworthy AI</strong></a><strong> (U.S. Chamber of Commerce &amp; Deloitte)</strong></p><p><a href="https://urldefense.proofpoint.com/v2/url?u=https-3A__www.uschamber.com_technology_us-2Dchamber-2Dreleases-2Dartificial-2Dintelligence-2Dprinciples&amp;d=DwMFaQ&amp;c=NxS7LVD4EucgUR9_G6bWzuqhmQ0xEJ2AZdqjz4WaSHU&amp;r=B7ZOiliriza76RYLXP93n3u-GoSA3z6riZ_8oCAMbEA&amp;m=1m9zdOqIQ_UdLrLV-LvtelCvl_wifhmvAqTtDs_lAFalRBjijImRrlgnAOr6BthU&amp;s=Z7sThUVa0fzu4sKgBdsVqtZ8inA-4nBKSpfhVdEyZAQ&amp;e="><strong>U.S. Chamber Artificial Intelligence Principles</strong></a><strong> (U.S. Chamber of Commerce)</strong></p><p><a href="https://urldefense.proofpoint.com/v2/url?u=https-3A__americaninnovators.com_empowering-2Dsmall-2Dbusiness_&amp;d=DwMFaQ&amp;c=NxS7LVD4EucgUR9_G6bWzuqhmQ0xEJ2AZdqjz4WaSHU&amp;r=B7ZOiliriza76RYLXP93n3u-GoSA3z6riZ_8oCAMbEA&amp;m=1m9zdOqIQ_UdLrLV-LvtelCvl_wifhmvAqTtDs_lAFalRBjijImRrlgnAOr6BthU&amp;s=DUEcBuKAZYr6sHKzkDic28y8cgIjdyQV5qldHBaxaqE&amp;e="><strong>Impact of Technology on U.S. Small Businesses </strong></a><strong>(U.S. Chamber of Commerce Technology Engagement Center)</strong></p>]]>
      </itunes:summary>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Elham Tabassi and Reva Schwarz (NIST): What's the big deal about the NIST AI Risk Management Framework (AI RMF)?</title>
      <itunes:episode>66</itunes:episode>
      <podcast:episode>66</podcast:episode>
      <itunes:title>Elham Tabassi and Reva Schwarz (NIST): What's the big deal about the NIST AI Risk Management Framework (AI RMF)?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">77a848ac-61f4-4ad2-915d-ce5830741bd7</guid>
      <link>https://share.transistor.fm/s/0075a513</link>
      <description>
        <![CDATA[<p>Elham Tabassi and Reva Schwartz – two AI leaders from the National Institute of Standards and Technology (NIST) – join us this week to discuss the AI Risk Management Framework #AIRMF released on January 26th thanks to the herculean efforts of our guests. Tune in to find out why Miriam Vogel and Kay Firth-Butterfield believe the AI RMF will be game changing. Learn the purpose behind the AI RMF; the emblematic 18-month multi (multi)-stakeholder, transparent process to design it; how they made it ‘evergreen’ at a time when our AI progress is moving at a lightning speed pace and much more.</p><p>—</p><p><br></p><p>Materials mentioned in this episode:</p><p><a href="https://www.nist.gov/itl/ai-risk-management-framework">AI Risk Management Framework</a>, (NIST)</p><p><a href="https://pages.nist.gov/AIRMF/">NIST AI Risk Management Framework Playbook</a>, (NIST)</p><p><a href="https://www.nist.gov/itl/ai-risk-management-framework/perspectives-about-nist-artificial-intelligence-risk-management">Perspectives about the NIST Artificial Intelligence Risk Management Framework</a>, (NIST)</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Elham Tabassi and Reva Schwartz – two AI leaders from the National Institute of Standards and Technology (NIST) – join us this week to discuss the AI Risk Management Framework #AIRMF released on January 26th thanks to the herculean efforts of our guests. Tune in to find out why Miriam Vogel and Kay Firth-Butterfield believe the AI RMF will be game changing. Learn the purpose behind the AI RMF; the emblematic 18-month multi (multi)-stakeholder, transparent process to design it; how they made it ‘evergreen’ at a time when our AI progress is moving at a lightning speed pace and much more.</p><p>—</p><p><br></p><p>Materials mentioned in this episode:</p><p><a href="https://www.nist.gov/itl/ai-risk-management-framework">AI Risk Management Framework</a>, (NIST)</p><p><a href="https://pages.nist.gov/AIRMF/">NIST AI Risk Management Framework Playbook</a>, (NIST)</p><p><a href="https://www.nist.gov/itl/ai-risk-management-framework/perspectives-about-nist-artificial-intelligence-risk-management">Perspectives about the NIST Artificial Intelligence Risk Management Framework</a>, (NIST)</p>]]>
      </content:encoded>
      <pubDate>Mon, 06 Feb 2023 13:30:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/0075a513/9c14dfb3.mp3" length="48360180" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3021</itunes:duration>
      <itunes:summary>Elham Tabassi and Reva Schwartz – two AI leaders from the National Institute of Standards and Technology (NIST) – join us this week to discuss the AI Risk Management Framework #AIRMF released on January 26th thanks to the herculean efforts of our guests. Tune in to find out why Miriam Vogel and Kay Firth-Butterfield believe the AI RMF will be game changing. Learn the purpose behind the AI RMF; the emblematic 18-month multi (multi)-stakeholder, transparent process to design it; how they made it ‘evergreen’ at a time when our AI progress is moving at a lightning speed pace and much more.</itunes:summary>
      <itunes:subtitle>Elham Tabassi and Reva Schwartz – two AI leaders from the National Institute of Standards and Technology (NIST) – join us this week to discuss the AI Risk Management Framework #AIRMF released on January 26th thanks to the herculean efforts of our guests. </itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Davos in Review: Should we hit 'pause' on generative AI?</title>
      <itunes:episode>65</itunes:episode>
      <podcast:episode>65</podcast:episode>
      <itunes:title>Davos in Review: Should we hit 'pause' on generative AI?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">a7dd87f5-2fa8-4d9c-81ab-86ee5f020ee0</guid>
      <link>https://share.transistor.fm/s/dae4e33f</link>
      <description>
        <![CDATA[<p>The annual World Economic Forum (WEF) at Davos gathers leading thinkers in government, business and civil society annually to discuss current global economic and social challenges. This week, listen to WEF Executive Committee Member, our own co-host Kay Firth-Butterfield, and Miriam Vogel discuss why this was Kay's “best Davos yet”. Not surprisingly, generative AI and ChatGPT were among the hottest topics. Learn insights gleaned on generative AI’s power and limitations, the key role that investors plan in development and deployment of responsible AI, and how AI can predict wildfires and help fight the climate crisis. Leave a 5 star rating!</p><p>—</p><p>Davos discussions and materials mentioned in this episode:</p><p><a href="https://www.weforum.org/events/world-economic-forum-annual-meeting-2023/sessions/a-conversation-with-satya-nadella-ceo-of-microsoft">A Conversation with Satya Nadella, CEO of Microsoft</a></p><p><br></p><p><a href="https://www.weforum.org/events/world-economic-forum-annual-meeting-2023/sessions/generative-ai">Generative AI</a></p><p><br></p><p><a href="https://www.weforum.org/events/world-economic-forum-annual-meeting-2023/sessions/responsible-investment-in-ai-technologies">Investing in AI, with Care</a></p><p><br></p><p><a href="https://www.weforum.org/events/world-economic-forum-annual-meeting-2023/sessions/ai-for-climate-adaptation">AI for Climate Adaptation</a></p><p><br></p><p><a href="https://www.weforum.org/events/world-economic-forum-annual-meeting-2023/sessions/how-ai-fights-wildfires">How AI Fights Wildfires</a></p><p><br></p><p><a href="https://www.weforum.org/press/2023/01/satya-nadella-says-ai-golden-age-is-here-and-it-s-good-for-humanity">Satya Nadella Says AI Golden Age Is Here and ‘It’s Good for Humanity’</a></p><p><br></p><p><a href="https://www.weforum.org/agenda/2023/01/davos23-biggest-ai-developments-how-to-use-them/">These were the biggest AI developments in 2022. Now we must decide how to use them</a>, (Kay Firth-Butterfield)</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>The annual World Economic Forum (WEF) at Davos gathers leading thinkers in government, business and civil society annually to discuss current global economic and social challenges. This week, listen to WEF Executive Committee Member, our own co-host Kay Firth-Butterfield, and Miriam Vogel discuss why this was Kay's “best Davos yet”. Not surprisingly, generative AI and ChatGPT were among the hottest topics. Learn insights gleaned on generative AI’s power and limitations, the key role that investors plan in development and deployment of responsible AI, and how AI can predict wildfires and help fight the climate crisis. Leave a 5 star rating!</p><p>—</p><p>Davos discussions and materials mentioned in this episode:</p><p><a href="https://www.weforum.org/events/world-economic-forum-annual-meeting-2023/sessions/a-conversation-with-satya-nadella-ceo-of-microsoft">A Conversation with Satya Nadella, CEO of Microsoft</a></p><p><br></p><p><a href="https://www.weforum.org/events/world-economic-forum-annual-meeting-2023/sessions/generative-ai">Generative AI</a></p><p><br></p><p><a href="https://www.weforum.org/events/world-economic-forum-annual-meeting-2023/sessions/responsible-investment-in-ai-technologies">Investing in AI, with Care</a></p><p><br></p><p><a href="https://www.weforum.org/events/world-economic-forum-annual-meeting-2023/sessions/ai-for-climate-adaptation">AI for Climate Adaptation</a></p><p><br></p><p><a href="https://www.weforum.org/events/world-economic-forum-annual-meeting-2023/sessions/how-ai-fights-wildfires">How AI Fights Wildfires</a></p><p><br></p><p><a href="https://www.weforum.org/press/2023/01/satya-nadella-says-ai-golden-age-is-here-and-it-s-good-for-humanity">Satya Nadella Says AI Golden Age Is Here and ‘It’s Good for Humanity’</a></p><p><br></p><p><a href="https://www.weforum.org/agenda/2023/01/davos23-biggest-ai-developments-how-to-use-them/">These were the biggest AI developments in 2022. Now we must decide how to use them</a>, (Kay Firth-Butterfield)</p>]]>
      </content:encoded>
      <pubDate>Wed, 01 Feb 2023 19:00:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/dae4e33f/89bab80d.mp3" length="33224158" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2075</itunes:duration>
      <itunes:summary>The annual World Economic Forum (WEF) at Davos gathers leading thinkers in government, business and civil society annually to discuss current global economic and social challenges. This week, listen to WEF Executive Committee Member, our own co-host Kay Firth-Butterfield, and Miriam Vogel discuss why this was Kay's “best Davos yet”. Not surprisingly, generative AI and ChatGPT were among the hottest topics. Learn insights gleaned on generative AI’s power and limitations, the key role that investors plan in development and deployment of responsible AI, and how AI can predict wildfires and help fight the climate crisis.</itunes:summary>
      <itunes:subtitle>The annual World Economic Forum (WEF) at Davos gathers leading thinkers in government, business and civil society annually to discuss current global economic and social challenges. This week, listen to WEF Executive Committee Member, our own co-host Kay F</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Dr. Stuart Russell (UC Berkeley): Are we living in an AGI world?</title>
      <itunes:episode>64</itunes:episode>
      <podcast:episode>64</podcast:episode>
      <itunes:title>Dr. Stuart Russell (UC Berkeley): Are we living in an AGI world?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">b8fa64ef-3269-4d9d-8c67-abef752ded39</guid>
      <link>https://share.transistor.fm/s/deca2e46</link>
      <description>
        <![CDATA[<p>Dr. Stuart Russell (CS Prof, UC Berkeley) has kept us current on AI developments for decades and in this week’s episode, prepares us for the headlines we’ll hear about this week @Davos and in the coming year. He shares his thoughts and concerns on ChatGPT, Lethal Autonomous Weapons Systems, how the future of work might look through an AI lens, and a human compatible design for AI. Listen to this episode here and subscribe to ensure you catch other important upcoming discussions.</p><p>—</p><p><br></p><p>Materials mentioned in this episode:</p><p><a href="https://www.weforum.org/agenda/davos-2023">Davos 2023, the World Economic Forum</a><br><a href="https://www.weforum.org/podcasts/radio-davos">Radio Davos, A World Economic Forum Podcast</a></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Dr. Stuart Russell (CS Prof, UC Berkeley) has kept us current on AI developments for decades and in this week’s episode, prepares us for the headlines we’ll hear about this week @Davos and in the coming year. He shares his thoughts and concerns on ChatGPT, Lethal Autonomous Weapons Systems, how the future of work might look through an AI lens, and a human compatible design for AI. Listen to this episode here and subscribe to ensure you catch other important upcoming discussions.</p><p>—</p><p><br></p><p>Materials mentioned in this episode:</p><p><a href="https://www.weforum.org/agenda/davos-2023">Davos 2023, the World Economic Forum</a><br><a href="https://www.weforum.org/podcasts/radio-davos">Radio Davos, A World Economic Forum Podcast</a></p>]]>
      </content:encoded>
      <pubDate>Wed, 18 Jan 2023 10:30:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/deca2e46/b67d591f.mp3" length="49037378" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3063</itunes:duration>
      <itunes:summary>Dr. Stuart Russell (CS Prof, UC Berkeley) has kept us current on AI developments for decades and in this week’s episode, prepares us for the headlines we’ll hear about this week @Davos and in the coming year. He shares his thoughts and concerns on ChatGPT, Lethal Autonomous Weapons Systems, how the future of work might look through an AI lens, and a human compatible design for AI. Listen to this episode here and subscribe to ensure you catch other important upcoming discussions.</itunes:summary>
      <itunes:subtitle>Dr. Stuart Russell (CS Prof, UC Berkeley) has kept us current on AI developments for decades and in this week’s episode, prepares us for the headlines we’ll hear about this week @Davos and in the coming year. He shares his thoughts and concerns on ChatGPT</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>2022 Year in Review: Are we ready for what’s coming in AI?</title>
      <itunes:episode>63</itunes:episode>
      <podcast:episode>63</podcast:episode>
      <itunes:title>2022 Year in Review: Are we ready for what’s coming in AI?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">b279a795-866a-485e-9ce5-84da7a98e1a6</guid>
      <link>https://share.transistor.fm/s/e293336b</link>
      <description>
        <![CDATA[<p>In this special year-in-review edition of "In AI we Trust?", co-hosts Kay Firth-Butterfield (@KayFButterfield) and Miriam Vogel (@VogelMiriam) take a look back at the key themes and insights from their conversations. From interviews with thought leaders, government officials and senior executives in the field, we explore progress and challenges from the past year in the quest for trustworthy AI. We also look ahead to what you can expect to see and encounter, including key issues that are likely to emerge in AI in 2023. Join us as we reflect and gear up for an exciting year in the accelerated path toward game-changing and responsible AI.</p><p><br>—</p><p><br>Materials mentioned in this episode:</p><p><a href="https://www.weforum.org/agenda/davos-2023"><br>Davos 2023, the World Economic Forum</a></p><p><a href="https://www.washingtonpost.com/dc-md-va/2022/12/28/beyer-student-artificial-intelligence-degree/"><br>A 72-year-old congressman goes back to school, pursuing a degree in AI</a>, <em>The Washington Post</em></p><p><a href="https://corpgov.law.harvard.edu/2022/01/05/board-responsibility-for-artificial-intelligence-oversight/"><br>Board Responsibility for Artificial Intelligence Oversight</a>, Miriam Vogel and Robert G. Eccles, <em>Harvard Law School Forum on Corporate Governance<br></em><br></p><p><a href="https://www.weforum.org/agenda/2022/07/5-governance-tips-for-responsible-ai/">5 ways to avoid artificial intelligence bias with 'responsible AI'</a>, Miriam Vogel and Kay Firth-Butterfield</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this special year-in-review edition of "In AI we Trust?", co-hosts Kay Firth-Butterfield (@KayFButterfield) and Miriam Vogel (@VogelMiriam) take a look back at the key themes and insights from their conversations. From interviews with thought leaders, government officials and senior executives in the field, we explore progress and challenges from the past year in the quest for trustworthy AI. We also look ahead to what you can expect to see and encounter, including key issues that are likely to emerge in AI in 2023. Join us as we reflect and gear up for an exciting year in the accelerated path toward game-changing and responsible AI.</p><p><br>—</p><p><br>Materials mentioned in this episode:</p><p><a href="https://www.weforum.org/agenda/davos-2023"><br>Davos 2023, the World Economic Forum</a></p><p><a href="https://www.washingtonpost.com/dc-md-va/2022/12/28/beyer-student-artificial-intelligence-degree/"><br>A 72-year-old congressman goes back to school, pursuing a degree in AI</a>, <em>The Washington Post</em></p><p><a href="https://corpgov.law.harvard.edu/2022/01/05/board-responsibility-for-artificial-intelligence-oversight/"><br>Board Responsibility for Artificial Intelligence Oversight</a>, Miriam Vogel and Robert G. Eccles, <em>Harvard Law School Forum on Corporate Governance<br></em><br></p><p><a href="https://www.weforum.org/agenda/2022/07/5-governance-tips-for-responsible-ai/">5 ways to avoid artificial intelligence bias with 'responsible AI'</a>, Miriam Vogel and Kay Firth-Butterfield</p>]]>
      </content:encoded>
      <pubDate>Wed, 11 Jan 2023 15:30:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/e293336b/961a2617.mp3" length="32552542" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2033</itunes:duration>
      <itunes:summary>In this special year-in-review edition of "In AI we Trust?", co-hosts Kay Firth-Butterfield (@KayFButterfield) and Miriam Vogel (@VogelMiriam) take a look back at the key themes and insights from their conversations. From interviews with thought leaders, government officials and senior executives in the field, we explore progress and challenges from the past year in the quest for trustworthy AI. We also look ahead to what you can expect to see and encounter, including key issues that are likely to emerge in AI in 2023. Join us as we reflect and gear up for an exciting year in the accelerated path toward game-changing and responsible AI.</itunes:summary>
      <itunes:subtitle>In this special year-in-review edition of "In AI we Trust?", co-hosts Kay Firth-Butterfield (@KayFButterfield) and Miriam Vogel (@VogelMiriam) take a look back at the key themes and insights from their conversations. From interviews with thought leaders, </itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Dr. Suresh Venkatasubramanian (White House OSTP/Brown University): Can AI be as safe as our seatbelts?</title>
      <itunes:episode>62</itunes:episode>
      <podcast:episode>62</podcast:episode>
      <itunes:title>Dr. Suresh Venkatasubramanian (White House OSTP/Brown University): Can AI be as safe as our seatbelts?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">b4c629e1-65da-44f9-aeda-c4f2534646f9</guid>
      <link>https://share.transistor.fm/s/9fb770f2</link>
      <description>
        <![CDATA[<p>In this episode, we are joined by Dr. Suresh Venkatasubramanian, a former official at the White House Office of Science and Technology Policy (OSTP) and CS professor at Brown, to discuss his work in the White House developing policy, including the AI Bill of Rights Blueprint. Suresh also posits on the basis for current AI challenges as failure of imagination, the need to engage diverse voices in AI development, and the evolution of safety regulations for new technologies. </p><p>—</p><p>Materials mentioned in this episode:<br><a href="https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf">Blueprint for an AI Bill of Rights</a> (The White House)</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, we are joined by Dr. Suresh Venkatasubramanian, a former official at the White House Office of Science and Technology Policy (OSTP) and CS professor at Brown, to discuss his work in the White House developing policy, including the AI Bill of Rights Blueprint. Suresh also posits on the basis for current AI challenges as failure of imagination, the need to engage diverse voices in AI development, and the evolution of safety regulations for new technologies. </p><p>—</p><p>Materials mentioned in this episode:<br><a href="https://www.whitehouse.gov/wp-content/uploads/2022/10/Blueprint-for-an-AI-Bill-of-Rights.pdf">Blueprint for an AI Bill of Rights</a> (The White House)</p>]]>
      </content:encoded>
      <pubDate>Mon, 19 Dec 2022 05:00:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/9fb770f2/8c62332a.mp3" length="44180336" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2760</itunes:duration>
      <itunes:summary>In this episode, we are joined by Dr. Suresh Venkatasubramanian, a former official at the White House Office of Science and Technology Policy (OSTP) and CS professor at Brown, to discuss his work in the White House developing policy, including the AI Bill of Rights Blueprint. Suresh also posits on the basis for current AI challenges as failure of imagination, the need to engage diverse voices in AI development, and the evolution of safety regulations for new technologies.</itunes:summary>
      <itunes:subtitle>In this episode, we are joined by Dr. Suresh Venkatasubramanian, a former official at the White House Office of Science and Technology Policy (OSTP) and CS professor at Brown, to discuss his work in the White House developing policy, including the AI Bill</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Joaquin Quiñonero Candela (LinkedIn): Can we meet business goals AND attain responsible AI? (spoiler: we can and must)</title>
      <itunes:episode>61</itunes:episode>
      <podcast:episode>61</podcast:episode>
      <itunes:title>Joaquin Quiñonero Candela (LinkedIn): Can we meet business goals AND attain responsible AI? (spoiler: we can and must)</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">e83b7463-a069-416b-8214-df1399cd29fd</guid>
      <link>https://share.transistor.fm/s/44ab68c6</link>
      <description>
        <![CDATA[<p>This week, Joaquin Quiñonero Candela (LinkedIn, formerly at Facebook and Microsoft) joins us to discuss AI storytelling; ethics by design; the imperative of diversity to create effective AI; and strategies he uses to make responsible AI a priority for the engineers he manages, policy-makers he advises, and other important stakeholders.</p><p><br>—</p><p><br>Materials mentioned in this episode:</p><p><a href="https://www.belfercenter.org/publication/technology-primer-social-media-recommendation-algorithms"><br>Technology Primer: Social Media Recommendation Algorithms</a> (Harvard Belfer Center)</p><p><a href="https://www.belfercenter.org/event/finding-solutions-choice-control-and-content-policies"><br>Finding Solutions: Choice, Control, and Content Policies</a>; a conversation between Karen Hao and Joaquin Quiñonero Candela hosted live by the Harvard Belfer Center</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>This week, Joaquin Quiñonero Candela (LinkedIn, formerly at Facebook and Microsoft) joins us to discuss AI storytelling; ethics by design; the imperative of diversity to create effective AI; and strategies he uses to make responsible AI a priority for the engineers he manages, policy-makers he advises, and other important stakeholders.</p><p><br>—</p><p><br>Materials mentioned in this episode:</p><p><a href="https://www.belfercenter.org/publication/technology-primer-social-media-recommendation-algorithms"><br>Technology Primer: Social Media Recommendation Algorithms</a> (Harvard Belfer Center)</p><p><a href="https://www.belfercenter.org/event/finding-solutions-choice-control-and-content-policies"><br>Finding Solutions: Choice, Control, and Content Policies</a>; a conversation between Karen Hao and Joaquin Quiñonero Candela hosted live by the Harvard Belfer Center</p>]]>
      </content:encoded>
      <pubDate>Wed, 07 Dec 2022 05:00:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/44ab68c6/ab5486af.mp3" length="42203977" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2636</itunes:duration>
      <itunes:summary>This week, Joaquin Quiñonero Candela (LinkedIn, formerly at Facebook and Microsoft) joins us to discuss AI storytelling; ethics by design; the imperative of diversity to create effective AI; and strategies he uses to make responsible AI a priority for the engineers he manages, policy-makers he advises, and other important stakeholders.</itunes:summary>
      <itunes:subtitle>This week, Joaquin Quiñonero Candela (LinkedIn, formerly at Facebook and Microsoft) joins us to discuss AI storytelling; ethics by design; the imperative of diversity to create effective AI; and strategies he uses to make responsible AI a priority for the</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Deputy Secretary Graves (DOC) answers the question: Can We Maintain Our AI Lead? (spoiler alert: We are AI Ready!)</title>
      <itunes:episode>60</itunes:episode>
      <podcast:episode>60</podcast:episode>
      <itunes:title>Deputy Secretary Graves (DOC) answers the question: Can We Maintain Our AI Lead? (spoiler alert: We are AI Ready!)</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">e1538ad6-aa48-4027-8c83-9f3e2b715261</guid>
      <link>https://share.transistor.fm/s/822a053e</link>
      <description>
        <![CDATA[<p>The Department of Commerce plays a key role in the USG’s leadership in AI given the multiple ways AI is used, patented and governed by the Department. In this special episode, hear from Commerce Deputy Secretary Don Graves on how the US intends to maintain leadership in AI, including through its creation of standards to attain trustworthy AI, working with our allies and ensuring an inclusive and ready AI workforce. </p><p><br>—</p><p><br>Materials mentioned in this episode:</p><p><br><a href="https://www.weforum.org/press/2022/11/proposed-law-enforcement-principles-on-the-responsible-use-of-facial-recognition-technology-released/">Proposed Law Enforcement Principles on the Responsible Use of Facial Recognition Technology Released from the World Economic Forum</a> </p><p><a href="https://www.fisheries.noaa.gov/new-england-mid-atlantic/science-data/artificial-intelligence-detecting-marine-animals-satellites">Artificial Intelligence: Detecting Marine Animals with Satellites (NOAA Fisheries)</a></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>The Department of Commerce plays a key role in the USG’s leadership in AI given the multiple ways AI is used, patented and governed by the Department. In this special episode, hear from Commerce Deputy Secretary Don Graves on how the US intends to maintain leadership in AI, including through its creation of standards to attain trustworthy AI, working with our allies and ensuring an inclusive and ready AI workforce. </p><p><br>—</p><p><br>Materials mentioned in this episode:</p><p><br><a href="https://www.weforum.org/press/2022/11/proposed-law-enforcement-principles-on-the-responsible-use-of-facial-recognition-technology-released/">Proposed Law Enforcement Principles on the Responsible Use of Facial Recognition Technology Released from the World Economic Forum</a> </p><p><a href="https://www.fisheries.noaa.gov/new-england-mid-atlantic/science-data/artificial-intelligence-detecting-marine-animals-satellites">Artificial Intelligence: Detecting Marine Animals with Satellites (NOAA Fisheries)</a></p>]]>
      </content:encoded>
      <pubDate>Wed, 16 Nov 2022 05:00:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/822a053e/f2a2cbf7.mp3" length="36856765" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2302</itunes:duration>
      <itunes:summary>The Department of Commerce plays a key role in the USG’s leadership in AI given the multiple ways AI is used, patented, and governed by the Department. In this special episode, hear from Commerce Deputy Secretary Don Graves on how the US intends to maintain leadership in AI, including through its creation of standards to attain trustworthy AI, working with our allies and ensuring an inclusive and ready AI workforce.</itunes:summary>
      <itunes:subtitle>The Department of Commerce plays a key role in the USG’s leadership in AI given the multiple ways AI is used, patented, and governed by the Department. In this special episode, hear from Commerce Deputy Secretary Don Graves on how the US intends to mainta</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Carl Hahn (NOC): When your AI reaches from the cosmos to the seafloor, and the universe in between, how can you ensure it is safe and trustworthy?</title>
      <itunes:episode>59</itunes:episode>
      <podcast:episode>59</podcast:episode>
      <itunes:title>Carl Hahn (NOC): When your AI reaches from the cosmos to the seafloor, and the universe in between, how can you ensure it is safe and trustworthy?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">d6c955ba-5694-49d6-a6f3-13db1437bfb4</guid>
      <link>https://share.transistor.fm/s/f4b382dd</link>
      <description>
        <![CDATA[<p>Carl Hahn, Vice President and Chief compliance officer at Northrop Grumman, one of the world’s largest military technology providers, joins us on this episode to help answer this question that he addresses daily. Carl shares his perspective on the impact of the DoD principles, how governments and companies need to align on the “how” of developing and using AI responsibly, and much more. </p><p>---------------</p><p>Materials mentioned in this episode:</p><p><a href="https://www.youtube.com/c/NIST/videos">NAIAC Field Hearing @ NIST YouTube Page</a></p><p><a href="https://www.defense.gov/News/News-Stories/Article/Article/2094085/dod-adopts-5-principles-of-artificial-intelligence-ethics/">“DOD Adopts 5 Principles of Artificial Intelligence Ethics” (Department of Defense)</a></p><p><a href="https://www.northropgrumman.com/what-we-do/defense-ai-technology-worlds-apart-from-commercial-ai/">“Defense AI Technology: Worlds Apart From Commercial AI” (Northrop Grumman)</a></p><p><a href="https://www.smarttoyawards.org/">Smart Toys (World Economic Forum): Smart Toy Awards</a></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Carl Hahn, Vice President and Chief compliance officer at Northrop Grumman, one of the world’s largest military technology providers, joins us on this episode to help answer this question that he addresses daily. Carl shares his perspective on the impact of the DoD principles, how governments and companies need to align on the “how” of developing and using AI responsibly, and much more. </p><p>---------------</p><p>Materials mentioned in this episode:</p><p><a href="https://www.youtube.com/c/NIST/videos">NAIAC Field Hearing @ NIST YouTube Page</a></p><p><a href="https://www.defense.gov/News/News-Stories/Article/Article/2094085/dod-adopts-5-principles-of-artificial-intelligence-ethics/">“DOD Adopts 5 Principles of Artificial Intelligence Ethics” (Department of Defense)</a></p><p><a href="https://www.northropgrumman.com/what-we-do/defense-ai-technology-worlds-apart-from-commercial-ai/">“Defense AI Technology: Worlds Apart From Commercial AI” (Northrop Grumman)</a></p><p><a href="https://www.smarttoyawards.org/">Smart Toys (World Economic Forum): Smart Toy Awards</a></p>]]>
      </content:encoded>
      <pubDate>Wed, 02 Nov 2022 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/f4b382dd/f923a888.mp3" length="42616665" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2662</itunes:duration>
      <itunes:summary>Carl Hahn, Vice President and Chief compliance officer at Northrop Grumman, one of the world’s largest military technology providers, joins us on this episode to help answer this question that he addresses daily. Carl shares his perspective on the impact of the DoD principles, how governments and companies need to align on the “how” of developing and using AI responsibly, and much more.</itunes:summary>
      <itunes:subtitle>Carl Hahn, Vice President and Chief compliance officer at Northrop Grumman, one of the world’s largest military technology providers, joins us on this episode to help answer this question that he addresses daily. Carl shares his perspective on the impact </itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Mark Brayan (Appen): For whom is your data performing?</title>
      <itunes:episode>58</itunes:episode>
      <podcast:episode>58</podcast:episode>
      <itunes:title>Mark Brayan (Appen): For whom is your data performing?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">2f326987-3d71-44eb-b823-f4af12b529b5</guid>
      <link>https://share.transistor.fm/s/1ee1935f</link>
      <description>
        <![CDATA[<p>In this episode, Mark Brayan focuses on a key ingredient for responsible AI: ethically sourced, inclusive data. Mark is the CEO and Managing director of Appen, which provides training data for thousands of machine learning and AI initiatives. Good quality data is imperative for responsible AI (garbage in, garbage out), and part of that equation is making sure that it is sourced inclusively, responsibly, and ethically. When developing and using responsible AI, it’s critically important to get your data right by asking the right questions; for whom is your data performing – and for whom could it fail?</p><p>— Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a> and follow us on Twitter: <a href="https://twitter.com/ai_equal?lang=en">@ai_equal</a>.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, Mark Brayan focuses on a key ingredient for responsible AI: ethically sourced, inclusive data. Mark is the CEO and Managing director of Appen, which provides training data for thousands of machine learning and AI initiatives. Good quality data is imperative for responsible AI (garbage in, garbage out), and part of that equation is making sure that it is sourced inclusively, responsibly, and ethically. When developing and using responsible AI, it’s critically important to get your data right by asking the right questions; for whom is your data performing – and for whom could it fail?</p><p>— Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a> and follow us on Twitter: <a href="https://twitter.com/ai_equal?lang=en">@ai_equal</a>.</p>]]>
      </content:encoded>
      <pubDate>Wed, 12 Oct 2022 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/1ee1935f/32b44b66.mp3" length="27728791" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1732</itunes:duration>
      <itunes:summary>In this episode, Mark Brayan focuses on a key ingredient for responsible AI: ethically sourced, inclusive data. Mark is the CEO and Managing director of Appen, which provides training data for thousands of machine learning and AI initiatives. Good quality data is imperative for responsible AI (garbage in, garbage out), and part of that equation is making sure that it is sourced inclusively, responsibly, and ethically. When developing and using responsible AI, it’s critically important to get your data right by asking the right questions; for whom is your data performing – and for whom could it fail?</itunes:summary>
      <itunes:subtitle>In this episode, Mark Brayan focuses on a key ingredient for responsible AI: ethically sourced, inclusive data. Mark is the CEO and Managing director of Appen, which provides training data for thousands of machine learning and AI initiatives. Good quality</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Krishnaram Kenthapadi (Fiddler.ai): Citizen audits are coming; are you ready?</title>
      <itunes:episode>57</itunes:episode>
      <podcast:episode>57</podcast:episode>
      <itunes:title>Krishnaram Kenthapadi (Fiddler.ai): Citizen audits are coming; are you ready?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">8f1cc70b-954c-42e2-b13d-39f068c0fd26</guid>
      <link>https://share.transistor.fm/s/5f72552d</link>
      <description>
        <![CDATA[<p>Krishnaram is the Chief Scientist of Fiddler AI, an enterprise startup building a responsible AI and Machine Learning monitoring platform. Prior to Fiddler AI, Krishnaram has served as Principal Scientist at Amazon AWS AI, on the LinkedIn AI team, and on Microsoft's AI and Ethics in Engineering and Research (AETHER) Advisory Board. In this episode, Krishanaram warns of the importance of not simply performing the important task of model validation but continuing to test it post deployment. He also highlights incentives to test your AI early and often: even without new laws in place, empowered and tech-savvy citizens are increasingly taking audits into their own hands.</p><p>— Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a> and follow us on Twitter: <a href="https://twitter.com/ai_equal?lang=en">@ai_equal</a>.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Krishnaram is the Chief Scientist of Fiddler AI, an enterprise startup building a responsible AI and Machine Learning monitoring platform. Prior to Fiddler AI, Krishnaram has served as Principal Scientist at Amazon AWS AI, on the LinkedIn AI team, and on Microsoft's AI and Ethics in Engineering and Research (AETHER) Advisory Board. In this episode, Krishanaram warns of the importance of not simply performing the important task of model validation but continuing to test it post deployment. He also highlights incentives to test your AI early and often: even without new laws in place, empowered and tech-savvy citizens are increasingly taking audits into their own hands.</p><p>— Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a> and follow us on Twitter: <a href="https://twitter.com/ai_equal?lang=en">@ai_equal</a>.</p>]]>
      </content:encoded>
      <pubDate>Wed, 28 Sep 2022 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/5f72552d/aa1a4291.mp3" length="42594111" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2661</itunes:duration>
      <itunes:summary>Krishnaram is the Chief Scientist of Fiddler AI, an enterprise startup building a responsible AI and Machine Learning monitoring platform. Prior to Fiddler AI, Krishnaram has served as Principal Scientist at Amazon AWS AI, on the LinkedIn AI team, and on Microsoft's AI and Ethics in Engineering and Research (AETHER) Advisory Board. In this episode, Krishanaram warns of the importance of not simply performing the important task of model validation but continuing to test it post-deployment. He also highlights incentives to test your AI early and often: even without new laws in place, empowered and tech-savvy citizens are increasingly taking audits into their own hands.</itunes:summary>
      <itunes:subtitle>Krishnaram is the Chief Scientist of Fiddler AI, an enterprise startup building a responsible AI and Machine Learning monitoring platform. Prior to Fiddler AI, Krishnaram has served as Principal Scientist at Amazon AWS AI, on the LinkedIn AI team, and on </itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Dr. Edson Prestes: Can we ingrain empathy into our AI?</title>
      <itunes:episode>56</itunes:episode>
      <podcast:episode>56</podcast:episode>
      <itunes:title>Dr. Edson Prestes: Can we ingrain empathy into our AI?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">1b71f83f-7627-4fc5-b745-937da7818154</guid>
      <link>https://share.transistor.fm/s/c57ffe0e</link>
      <description>
        <![CDATA[<p>Dr. Prestes, Professor of Computer Science at the Institute of Informatics, Federal University of Rio Grande do Sul and leader of the Phi Robotics Research Group. In this episode, Dr. Prestes shares his trailblazing work in international AI policy and standards, including the development of the first global AI ethics instrument. Dr. Prestes discusses ethics in technology and the infusion of empathy, as well as his focus on establishing human rights for a digital world.<br> <br>— Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a> and follow us on Twitter: <a href="https://twitter.com/ai_equal?lang=en">@ai_equal</a>.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Dr. Prestes, Professor of Computer Science at the Institute of Informatics, Federal University of Rio Grande do Sul and leader of the Phi Robotics Research Group. In this episode, Dr. Prestes shares his trailblazing work in international AI policy and standards, including the development of the first global AI ethics instrument. Dr. Prestes discusses ethics in technology and the infusion of empathy, as well as his focus on establishing human rights for a digital world.<br> <br>— Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a> and follow us on Twitter: <a href="https://twitter.com/ai_equal?lang=en">@ai_equal</a>.</p>]]>
      </content:encoded>
      <pubDate>Wed, 14 Sep 2022 05:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/c57ffe0e/e8201089.mp3" length="43720061" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2731</itunes:duration>
      <itunes:summary>Dr. Prestes, Professor of Computer Science at the Institute of Informatics, Federal University of Rio Grande do Sul and leader of the Phi Robotics Research Group. In this episode, Dr. Prestes shares his trailblazing work in international AI policy and standards, including the development of the first global AI ethics instrument. Dr. Prestes discusses ethics in technology and the infusion of empathy, as well as his focus on establishing human rights for a digital world.</itunes:summary>
      <itunes:subtitle>Dr. Prestes, Professor of Computer Science at the Institute of Informatics, Federal University of Rio Grande do Sul and leader of the Phi Robotics Research Group. In this episode, Dr. Prestes shares his trailblazing work in international AI policy and sta</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Joe Bradley (LivePerson): How much 'rat poison' is in our AI and can AI be more "human"?</title>
      <itunes:episode>55</itunes:episode>
      <podcast:episode>55</podcast:episode>
      <itunes:title>Joe Bradley (LivePerson): How much 'rat poison' is in our AI and can AI be more "human"?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">19f12380-d379-48e9-94bf-e953e5454248</guid>
      <link>https://share.transistor.fm/s/5eedddba</link>
      <description>
        <![CDATA[<p>Joe Bradley is the Chief Scientist at LivePerson, a leading Conversational AI company creating digital experiences that are “Curiously Human”, powering nearly a billion conversational interactions monthly in their Conversational Cloud. In this episode, Joe shares the broad lens he brings to his work in AI. He discusses the interconnectedness between AI and humanity, and his work at LivePerson to develop “empathetic” AI systems to help brands better connect with their customers. Joe addresses his experience in the EqualAI Badge program and basic challenges in reducing bias in AI, from determining what to measure to whom to consider when evaluating our systems; and asks how much “rat poison” is tolerated in our cereal (AI systems).</p><p>— Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a> and follow us on Twitter: <a href="https://twitter.com/ai_equal?lang=en">@ai_equal</a>.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Joe Bradley is the Chief Scientist at LivePerson, a leading Conversational AI company creating digital experiences that are “Curiously Human”, powering nearly a billion conversational interactions monthly in their Conversational Cloud. In this episode, Joe shares the broad lens he brings to his work in AI. He discusses the interconnectedness between AI and humanity, and his work at LivePerson to develop “empathetic” AI systems to help brands better connect with their customers. Joe addresses his experience in the EqualAI Badge program and basic challenges in reducing bias in AI, from determining what to measure to whom to consider when evaluating our systems; and asks how much “rat poison” is tolerated in our cereal (AI systems).</p><p>— Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a> and follow us on Twitter: <a href="https://twitter.com/ai_equal?lang=en">@ai_equal</a>.</p>]]>
      </content:encoded>
      <pubDate>Wed, 24 Aug 2022 11:02:48 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/5eedddba/dfae7819.mp3" length="50349912" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3144</itunes:duration>
      <itunes:summary>Joe Bradley is the Chief Scientist at LivePerson, a leading Conversational AI company creating digital experiences that are “Curiously Human”, powering nearly a billion conversational interactions monthly in their Conversational Cloud. In this episode, Joe shares the broad lens he brings to his work in AI. He discusses the interconnectedness between AI and humanity, and his work at LivePerson to develop “empathetic” AI systems to help brands better connect with their customers. Joe addresses his experience in the EqualAI Badge program and basic challenges in reducing bias in AI, from determining what to measure to whom to consider when evaluating our systems; and asks how much “rat poison” is tolerated in our cereal (AI systems).</itunes:summary>
      <itunes:subtitle>Joe Bradley is the Chief Scientist at LivePerson, a leading Conversational AI company creating digital experiences that are “Curiously Human”, powering nearly a billion conversational interactions monthly in their Conversational Cloud. In this episode, Jo</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Dr. Richard Benjamins (Telefonica): What are the key ingredients for a successful Responsible AI Framework?</title>
      <itunes:episode>54</itunes:episode>
      <podcast:episode>54</podcast:episode>
      <itunes:title>Dr. Richard Benjamins (Telefonica): What are the key ingredients for a successful Responsible AI Framework?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">9dc43756-1eb2-47e9-9928-b2c7f47d9b72</guid>
      <link>https://share.transistor.fm/s/0e6c4528</link>
      <description>
        <![CDATA[<p>Dr. Richard Benjamins is Chief AI &amp; Data Strategist at Telefonica, author of <em>The myth of the algorithm </em>and <em>A Data-Driven Company</em>, and co-founder of OdiseIA. In this week’s episode, Richard offers his roadmap for trustworthy AI, including his company's “aspirational” approach to AI governance, their use of an ethics committee, how they use the bottom line to reinforce their goals and other best practices in designing responsible AI use.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Dr. Richard Benjamins is Chief AI &amp; Data Strategist at Telefonica, author of <em>The myth of the algorithm </em>and <em>A Data-Driven Company</em>, and co-founder of OdiseIA. In this week’s episode, Richard offers his roadmap for trustworthy AI, including his company's “aspirational” approach to AI governance, their use of an ethics committee, how they use the bottom line to reinforce their goals and other best practices in designing responsible AI use.</p>]]>
      </content:encoded>
      <pubDate>Wed, 15 Jun 2022 14:30:12 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/0e6c4528/f8cfa82f.mp3" length="56284613" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3513</itunes:duration>
      <itunes:summary>Dr. Richard Benjamins is Chief AI &amp;amp; Data Strategist at Telefonica, author of The myth of the algorithm and A Data-Driven Company, and co-founder of OdiseIA. In this week’s episode, Richard offers his roadmap for trustworthy AI, including his company's “aspirational” approach to AI governance, their use of an ethics committee, how they use the bottom line to reinforce their goals and other best practices in designing responsible AI use.</itunes:summary>
      <itunes:subtitle>Dr. Richard Benjamins is Chief AI &amp;amp; Data Strategist at Telefonica, author of The myth of the algorithm and A Data-Driven Company, and co-founder of OdiseIA. In this week’s episode, Richard offers his roadmap for trustworthy AI, including his company's</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Beena Ammanath (Deloitte): What concrete steps companies can (must) take to achieve trustworthy AI</title>
      <itunes:episode>53</itunes:episode>
      <podcast:episode>53</podcast:episode>
      <itunes:title>Beena Ammanath (Deloitte): What concrete steps companies can (must) take to achieve trustworthy AI</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">d323390d-452f-4850-afaf-2f8251721079</guid>
      <link>https://share.transistor.fm/s/17a88155</link>
      <description>
        <![CDATA[<p>Beena Ammanath is Executive Director of the Global Deloitte AI Institute, author of T<em>rustworthy AI: A Business Guide For Navigating Trust and Ethics in AI</em> and founder of the nonprofit to increase diversity in tech, Humans for AI. In this episode, Beena explains where organizations (and others) can begin to embed AI ethics as a part of their routine business practice, the importance for policy makers and organizations alike to focus on use cases when building frameworks, and shares others lessons on how to ensure we create more inclusive, trustworthy AI. </p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Beena Ammanath is Executive Director of the Global Deloitte AI Institute, author of T<em>rustworthy AI: A Business Guide For Navigating Trust and Ethics in AI</em> and founder of the nonprofit to increase diversity in tech, Humans for AI. In this episode, Beena explains where organizations (and others) can begin to embed AI ethics as a part of their routine business practice, the importance for policy makers and organizations alike to focus on use cases when building frameworks, and shares others lessons on how to ensure we create more inclusive, trustworthy AI. </p>]]>
      </content:encoded>
      <pubDate>Fri, 27 May 2022 10:01:27 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/17a88155/5be79da5.mp3" length="49517096" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3091</itunes:duration>
      <itunes:summary>Beena Ammanath is Executive Director of the Global Deloitte AI Institute, author of Trustworthy AI: A Business Guide For Navigating Trust and Ethics in AI and founder of the nonprofit to increase diversity in tech, Humans for AI. In this episode, Beena explains where organizations (and others) can begin to embed AI ethics as a part of their routine business practice, the importance for policy makers and organizations alike to focus on use cases when building frameworks, and shares others lessons on how to ensure we create more inclusive, trustworthy AI. </itunes:summary>
      <itunes:subtitle>Beena Ammanath is Executive Director of the Global Deloitte AI Institute, author of Trustworthy AI: A Business Guide For Navigating Trust and Ethics in AI and founder of the nonprofit to increase diversity in tech, Humans for AI. In this episode, Beena ex</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Dr. Margaret Mitchell: How can we ensure AI reflects our values – and why this matters to each of us?</title>
      <itunes:episode>52</itunes:episode>
      <podcast:episode>52</podcast:episode>
      <itunes:title>Dr. Margaret Mitchell: How can we ensure AI reflects our values – and why this matters to each of us?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">fe35d129-964e-42fb-8346-ed7799e6373c</guid>
      <link>https://share.transistor.fm/s/cba30ffb</link>
      <description>
        <![CDATA[<p>Dr. Margaret Mitchell is a renowned researcher who has won numerous awards for her work developing practical tools to combine ethics and machine learning. Last<br>Fall, Dr. Mitchell joined the AI startup <em>Hugging<br>Face (</em> "to democratize <em>good</em> machine learning") and previously research positions at Google and Microsoft. In<br>this episode, Dr. Mitchell articulates numerous<br>challenges in the endeavor to create ethical AI. She also illuminates the<br>distinction between ethical and responsible AI; the necessity of a<br>human-centered, inclusive approach to AI development; and the need for policy<br>makers to understand AI. ----- Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a> and follow us on Twitter: @ai_equal </p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Dr. Margaret Mitchell is a renowned researcher who has won numerous awards for her work developing practical tools to combine ethics and machine learning. Last<br>Fall, Dr. Mitchell joined the AI startup <em>Hugging<br>Face (</em> "to democratize <em>good</em> machine learning") and previously research positions at Google and Microsoft. In<br>this episode, Dr. Mitchell articulates numerous<br>challenges in the endeavor to create ethical AI. She also illuminates the<br>distinction between ethical and responsible AI; the necessity of a<br>human-centered, inclusive approach to AI development; and the need for policy<br>makers to understand AI. ----- Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a> and follow us on Twitter: @ai_equal </p>]]>
      </content:encoded>
      <pubDate>Tue, 10 May 2022 15:20:41 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/cba30ffb/9a245b1c.mp3" length="54369073" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3391</itunes:duration>
      <itunes:summary>Dr. Margaret Mitchell is a renowned researcher who has won numerous awards for her work developing practical tools to combine ethics and machine learning. </itunes:summary>
      <itunes:subtitle>Dr. Margaret Mitchell is a renowned researcher who has won numerous awards for her work developing practical tools to combine ethics and machine learning. </itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Rep. Don Beyer (D-VA): Can the U.S. Congress Create Legislative Frameworks to Support AI Development (and should it)?</title>
      <itunes:episode>51</itunes:episode>
      <podcast:episode>51</podcast:episode>
      <itunes:title>Rep. Don Beyer (D-VA): Can the U.S. Congress Create Legislative Frameworks to Support AI Development (and should it)?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">ad7e3c22-6885-4f98-b270-24f54135c8bd</guid>
      <link>https://share.transistor.fm/s/58679860</link>
      <description>
        <![CDATA[Rep. Don Beyer (D-VA) is Chair of Congress' Joint Economic Committee and serves on the Ways and Means and the Science, Space and Technology Committees, as well as a member of the AI Caucus- and in his spare time, he is pursuing a Masters Degree in Artificial Intelligence. In this episode, Rep. Beyer explains his enthusiasm for AI and the opportunities it presents to enhance human life -- (e.g., better understanding and treating long covid and preserving life in suicide prevention)-- and the potential harms he is concerned about, as well as the ability of the US Congress to appropriately address these challenges. <p><br></p>]]>
      </description>
      <content:encoded>
        <![CDATA[Rep. Don Beyer (D-VA) is Chair of Congress' Joint Economic Committee and serves on the Ways and Means and the Science, Space and Technology Committees, as well as a member of the AI Caucus- and in his spare time, he is pursuing a Masters Degree in Artificial Intelligence. In this episode, Rep. Beyer explains his enthusiasm for AI and the opportunities it presents to enhance human life -- (e.g., better understanding and treating long covid and preserving life in suicide prevention)-- and the potential harms he is concerned about, as well as the ability of the US Congress to appropriately address these challenges. <p><br></p>]]>
      </content:encoded>
      <pubDate>Tue, 26 Apr 2022 16:26:33 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/58679860/8611ecfc.mp3" length="35103258" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2190</itunes:duration>
      <itunes:summary>Rep. Don Beyer (D-VA) is Chair of Congress' Joint Economic Committee and serves on the Ways and Means and the Science, Space and Technology Committees, as well as a member of the AI Caucus- and in his spare time, he is pursuing a Masters Degree in Artificial Intelligence. In this episode, Rep. Beyer explains his enthusiasm for AI and the opportunities it presents to enhance human life -- (e.g., better understanding and treating long covid and preserving life in suicide prevention)-- and the potential harms he is concerned about, as well as the ability of the US Congress to appropriately address these challenges. </itunes:summary>
      <itunes:subtitle>Rep. Don Beyer (D-VA) is Chair of Congress' Joint Economic Committee and serves on the Ways and Means and the Science, Space and Technology Committees, as well as a member of the AI Caucus- and in his spare time, he is pursuing a Masters Degree in Artific</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Mira Lane (Microsoft): Can compassion lead to better AI? </title>
      <itunes:episode>50</itunes:episode>
      <podcast:episode>50</podcast:episode>
      <itunes:title>Mira Lane (Microsoft): Can compassion lead to better AI? </itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">253fe080-cd95-4e45-ad2b-0affa100a808</guid>
      <link>https://share.transistor.fm/s/8015fc80</link>
      <description>
        <![CDATA[Mira Lane, a a polymath, technologist and artist, is the head of Ethics &amp; Society at Microsoft, a multidisciplinary group responsible for guiding AI innovation that leads to ethical, responsible, and sustainable outcomes. In this episode, she shares how the culture at Microsoft includes compassion in AI development to the benefit of their AI products, how she changes the perception of responsible AI from a tax to a value-add and how games can play a role in achieving this goal.----- Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a> and follow us on Twitter: @ai_equal<p><br></p>]]>
      </description>
      <content:encoded>
        <![CDATA[Mira Lane, a a polymath, technologist and artist, is the head of Ethics &amp; Society at Microsoft, a multidisciplinary group responsible for guiding AI innovation that leads to ethical, responsible, and sustainable outcomes. In this episode, she shares how the culture at Microsoft includes compassion in AI development to the benefit of their AI products, how she changes the perception of responsible AI from a tax to a value-add and how games can play a role in achieving this goal.----- Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a> and follow us on Twitter: @ai_equal<p><br></p>]]>
      </content:encoded>
      <pubDate>Thu, 14 Apr 2022 12:26:59 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/8015fc80/af095584.mp3" length="45647876" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2848</itunes:duration>
      <itunes:summary>Mira Lane, a a polymath, technologist and artist, is the head of Ethics &amp;amp; Society at Microsoft, a multidisciplinary group responsible for guiding AI innovation that leads to ethical, responsible, and sustainable outcomes. In this episode, she shares how the culture at Microsoft includes compassion in AI development to the benefit of their AI products, how she changes the perception of responsible AI from a tax to a value-add and how games can play a role in achieving this goal.----- Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Mira Lane, a a polymath, technologist and artist, is the head of Ethics &amp;amp; Society at Microsoft, a multidisciplinary group responsible for guiding AI innovation that leads to ethical, responsible, and sustainable outcomes. In this episode, she shares h</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Dr. Athina Kanioura (PepsiCo): Is AI a Privilege Reserved for Big Tech? </title>
      <itunes:episode>49</itunes:episode>
      <podcast:episode>49</podcast:episode>
      <itunes:title>Dr. Athina Kanioura (PepsiCo): Is AI a Privilege Reserved for Big Tech? </itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">90ee8ee1-a34a-4719-a036-69773908c187</guid>
      <link>https://share.transistor.fm/s/4fa8f92d</link>
      <description>
        <![CDATA[Dr. Athina Kanioura is Chief Strategy and Transformation Officer at PepsiCo, leading their company-wide transformation in digital strategy. In this episode, Athina opens our eyes to ways that companies like PepsiCo are using AI (and equally important, where they are not). She shares challenges in undergoing a digital transformation and explains their legacy-focused approach to AI integration as a means for greater efficiency as well as instilling better sustainability practices, upskilling employees and supporting small business partners.]]>
      </description>
      <content:encoded>
        <![CDATA[Dr. Athina Kanioura is Chief Strategy and Transformation Officer at PepsiCo, leading their company-wide transformation in digital strategy. In this episode, Athina opens our eyes to ways that companies like PepsiCo are using AI (and equally important, where they are not). She shares challenges in undergoing a digital transformation and explains their legacy-focused approach to AI integration as a means for greater efficiency as well as instilling better sustainability practices, upskilling employees and supporting small business partners.]]>
      </content:encoded>
      <pubDate>Thu, 07 Apr 2022 20:16:02 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/4fa8f92d/c3c97e5a.mp3" length="45528890" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2841</itunes:duration>
      <itunes:summary>Dr. Athina Kanioura is Chief Strategy and Transformation Officer at PepsiCo, leading their company-wide transformation in digital strategy. In this episode, Athina opens our eyes to ways that companies like PepsiCo are using AI (and equally important, where they are not). She shares challenges in undergoing a digital transformation and explains their legacy-focused approach to AI integration as a means for greater efficiency as well as instilling better sustainability practices, upskilling employees and supporting small business partners.</itunes:summary>
      <itunes:subtitle>Dr. Athina Kanioura is Chief Strategy and Transformation Officer at PepsiCo, leading their company-wide transformation in digital strategy. In this episode, Athina opens our eyes to ways that companies like PepsiCo are using AI (and equally important, whe</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Keith Sonderling, EEOC Commissioner: Does AI scale or reduce bias in the workplace?</title>
      <itunes:episode>48</itunes:episode>
      <podcast:episode>48</podcast:episode>
      <itunes:title>Keith Sonderling, EEOC Commissioner: Does AI scale or reduce bias in the workplace?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">e11018b0-60be-4f7c-bd1c-db4f63cadd54</guid>
      <link>https://share.transistor.fm/s/93c3441a</link>
      <description>
        <![CDATA[<p>Keith Sonderling is a Commissioner of the U.S. Equal Employment Opportunity Commission (EEOC) and helped launch the EEOC's unprecedented Initiative on Artificial Intelligence and Algorithmic Fairness in 2021. In this episode, he shares guidance for employers on building, buying and employing AI programs in HR systems and shares his optimism on the unique opportunity we have at this moment to ensure a significant, positive impact in deploying AI technology.</p><p><strong>Subscribe to catch each new episode! Find us on Apple</strong>(<a href="https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151">https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151</a>), Amazon, Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a> and follow us on Twitter: @ai_equal and LinkedIn (<a href="https://www.linkedin.com/company/equalai/?viewAsMember=true">https://www.linkedin.com/company/equalai/?viewAsMember=true</a>). Find the referenced WEF Toolkit on Artificial Intelligence for Human Resources here: <a href="https://www.weforum.org/reports/human-centred-ai-for-hr-state-of-play-and-the-path-ahead#report-nav">https://www.weforum.org/reports/human-centred-ai-for-hr-state-of-play-and-the-path-ahead#report-nav</a>).</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Keith Sonderling is a Commissioner of the U.S. Equal Employment Opportunity Commission (EEOC) and helped launch the EEOC's unprecedented Initiative on Artificial Intelligence and Algorithmic Fairness in 2021. In this episode, he shares guidance for employers on building, buying and employing AI programs in HR systems and shares his optimism on the unique opportunity we have at this moment to ensure a significant, positive impact in deploying AI technology.</p><p><strong>Subscribe to catch each new episode! Find us on Apple</strong>(<a href="https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151">https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151</a>), Amazon, Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a> and follow us on Twitter: @ai_equal and LinkedIn (<a href="https://www.linkedin.com/company/equalai/?viewAsMember=true">https://www.linkedin.com/company/equalai/?viewAsMember=true</a>). Find the referenced WEF Toolkit on Artificial Intelligence for Human Resources here: <a href="https://www.weforum.org/reports/human-centred-ai-for-hr-state-of-play-and-the-path-ahead#report-nav">https://www.weforum.org/reports/human-centred-ai-for-hr-state-of-play-and-the-path-ahead#report-nav</a>).</p>]]>
      </content:encoded>
      <pubDate>Thu, 31 Mar 2022 15:40:49 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/93c3441a/6c271a72.mp3" length="49618803" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3096</itunes:duration>
      <itunes:summary>Keith Sonderling is a Commissioner of the U.S. Equal Employment Opportunity Commission (EEOC) and helped launch the EEOC's unprecedented Initiative on Artificial Intelligence and Algorithmic Fairness in 2021. In this episode, he shares guidance for employers on building, buying and employing AI programs in HR systems and shares his optimism on the unique opportunity we have at this moment to ensure a significant, positive impact in deploying AI technology.</itunes:summary>
      <itunes:subtitle>Keith Sonderling is a Commissioner of the U.S. Equal Employment Opportunity Commission (EEOC) and helped launch the EEOC's unprecedented Initiative on Artificial Intelligence and Algorithmic Fairness in 2021. In this episode, he shares guidance for employ</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>MP Darren Jones: 'Horizon Scanning' to Design Better AI Regulation</title>
      <itunes:episode>47</itunes:episode>
      <podcast:episode>47</podcast:episode>
      <itunes:title>MP Darren Jones: 'Horizon Scanning' to Design Better AI Regulation</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">980a0ce7-046e-46f5-9beb-036152d60566</guid>
      <link>https://share.transistor.fm/s/969c4692</link>
      <description>
        <![CDATA[Darren Jones is Member of UK Parliament who has chaired the Parliamentary Technology Information and Communications Forum, Parliamentary Commission on Technology Ethics, and Labour Digital. Darren is also the founding chair of the Institute of AI, a global coalition of legislators interested in AI, and he is a member of the World Economic Forum (WEF) Global AI Action Alliance (GAIAA). In this episode, Darren speaks to how legislators need to 'horizon scan' and understand cutting edge tech to translate it into creating more opportunities while reducing risk through laws and regulation. He argues regulation can support 'safety by design', instead of the tendency to retrofit AI trust and safety considerations at the end of the process. 

Subscribe to catch each new episode on Apple (https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151), Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal and LinkedIn (https://www.linkedin.com/company/equalai/?viewAsMember=true)]]>
      </description>
      <content:encoded>
        <![CDATA[Darren Jones is Member of UK Parliament who has chaired the Parliamentary Technology Information and Communications Forum, Parliamentary Commission on Technology Ethics, and Labour Digital. Darren is also the founding chair of the Institute of AI, a global coalition of legislators interested in AI, and he is a member of the World Economic Forum (WEF) Global AI Action Alliance (GAIAA). In this episode, Darren speaks to how legislators need to 'horizon scan' and understand cutting edge tech to translate it into creating more opportunities while reducing risk through laws and regulation. He argues regulation can support 'safety by design', instead of the tendency to retrofit AI trust and safety considerations at the end of the process. 

Subscribe to catch each new episode on Apple (https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151), Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal and LinkedIn (https://www.linkedin.com/company/equalai/?viewAsMember=true)]]>
      </content:encoded>
      <pubDate>Wed, 16 Mar 2022 22:43:50 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/969c4692/b2aa5724.mp3" length="38557234" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2406</itunes:duration>
      <itunes:summary>Darren Jones is Member of UK Parliament who has chaired the Parliamentary Technology Information and Communications Forum, Parliamentary Commission on Technology Ethics, and Labour Digital. Darren is also the founding chair of the Institute of AI, a global coalition of legislators interested in AI, and he is a member of the World Economic Forum (WEF) Global AI Action Alliance (GAIAA). In this episode, Darren speaks to how legislators need to 'horizon scan' and understand cutting edge tech to translate it into creating more opportunities while reducing risk through laws and regulation. He argues regulation can support 'safety by design', instead of the tendency to retrofit AI trust and safety considerations at the end of the process. 

Subscribe to catch each new episode on Apple (https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151), Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal and LinkedIn (https://www.linkedin.com/company/equalai/?viewAsMember=true)</itunes:summary>
      <itunes:subtitle>Darren Jones is Member of UK Parliament who has chaired the Parliamentary Technology Information and Communications Forum, Parliamentary Commission on Technology Ethics, and Labour Digital. Darren is also the founding chair of the Institute of AI, a globa</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Ziad Obermeyer: A physician, academic, McKinsey alum's approach to tackling bias in AI </title>
      <itunes:episode>46</itunes:episode>
      <podcast:episode>46</podcast:episode>
      <itunes:title>Ziad Obermeyer: A physician, academic, McKinsey alum's approach to tackling bias in AI </itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">cd10a189-c7d2-4cfb-8cfd-74ba8958469b</guid>
      <link>https://share.transistor.fm/s/8b9050b5</link>
      <description>
        <![CDATA[<p>Ziad Obermeyer is a Professor of Health Policy and Management<br>at the UC Berkeley School of Public Health where he conducts research at the intersection of machine learning, medicine, and health policy. Previously, he was a professor at Harvard Medical School and consultant at McKinsey &amp; Co. He continues to practice emergency medicine in underserved parts of the US and is also a co-founder of Nightingale Open Science, a computing platform giving<br>researchers access to massive new health imaging datasets. In this episode, you'll hear how he ended up co-authoring the seminal study to identify bias in AI health systems, published in Science in 2019, and whether you should be using his Algorithmic Bias Playbook.</p><p>Links to referenced articles and playbook: <br>http://ziadobermeyer.com/research/</p><p>https://www.chicagobooth.edu/research/center-for-applied-artificial-intelligence/research/algorithmic-bias</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Ziad Obermeyer is a Professor of Health Policy and Management<br>at the UC Berkeley School of Public Health where he conducts research at the intersection of machine learning, medicine, and health policy. Previously, he was a professor at Harvard Medical School and consultant at McKinsey &amp; Co. He continues to practice emergency medicine in underserved parts of the US and is also a co-founder of Nightingale Open Science, a computing platform giving<br>researchers access to massive new health imaging datasets. In this episode, you'll hear how he ended up co-authoring the seminal study to identify bias in AI health systems, published in Science in 2019, and whether you should be using his Algorithmic Bias Playbook.</p><p>Links to referenced articles and playbook: <br>http://ziadobermeyer.com/research/</p><p>https://www.chicagobooth.edu/research/center-for-applied-artificial-intelligence/research/algorithmic-bias</p>]]>
      </content:encoded>
      <pubDate>Wed, 09 Mar 2022 11:40:05 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/8b9050b5/29cc8f28.mp3" length="56852616" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3549</itunes:duration>
      <itunes:summary>Ziad Obermeyer is a Professor of Health Policy and Management
at the UC Berkeley School of Public Health where he conducts research at the intersection of machine learning, medicine, and health policy. Previously, he was a professor at Harvard Medical School and consultant at McKinsey &amp;amp; Co. He continues to practice emergency medicine in underserved parts of the US and is also a co-founder of Nightingale Open Science, a computing platform giving
researchers access to massive new health imaging datasets. In this episode, you'll hear how he ended up co-authoring the seminal study to identify bias in AI health systems, published in Science in 2019, and whether you should be using his Algorithmic Bias Playbook.

Links to referenced articles and playbook: 
http://ziadobermeyer.com/research/

https://www.chicagobooth.edu/research/center-for-applied-artificial-intelligence/research/algorithmic-bias</itunes:summary>
      <itunes:subtitle>Ziad Obermeyer is a Professor of Health Policy and Management
at the UC Berkeley School of Public Health where he conducts research at the intersection of machine learning, medicine, and health policy. Previously, he was a professor at Harvard Medical Sc</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Jen Gennai (Google): How to Manage the Creation of Responsible AI Products for Billions</title>
      <itunes:episode>45</itunes:episode>
      <podcast:episode>45</podcast:episode>
      <itunes:title>Jen Gennai (Google): How to Manage the Creation of Responsible AI Products for Billions</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">7d707360-7ad5-4939-bc41-9b0a31b49084</guid>
      <link>https://share.transistor.fm/s/08fe1275</link>
      <description>
        <![CDATA[Jen Gennai is Founder and Director of the Responsible Innovation Group at Google. In her current role leading the Responsible Innovation Group, Jen and her team are responsible for creating and operationalizing  Google’s AI Principles. In this episode, Jen shares what responsible AI means to her, lessons learned that inform her perspective from which we all can learn, how AI should or should not be regulated, and the AI innovations on the horizon she is excited to see come to fruition.]]>
      </description>
      <content:encoded>
        <![CDATA[Jen Gennai is Founder and Director of the Responsible Innovation Group at Google. In her current role leading the Responsible Innovation Group, Jen and her team are responsible for creating and operationalizing  Google’s AI Principles. In this episode, Jen shares what responsible AI means to her, lessons learned that inform her perspective from which we all can learn, how AI should or should not be regulated, and the AI innovations on the horizon she is excited to see come to fruition.]]>
      </content:encoded>
      <pubDate>Fri, 04 Mar 2022 00:34:24 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/08fe1275/51a5e9c7.mp3" length="65843552" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>4112</itunes:duration>
      <itunes:summary>Jen Gennai is Founder and Director of the Responsible Innovation Group at Google. In her current role leading the Responsible Innovation Group, Jen and her team are responsible for creating and operationalizing  Google’s AI Principles. In this episode, Jen shares what responsible AI means to her, lessons learned that inform her perspective from which we all can learn, how AI should or should not be regulated, and the AI innovations on the horizon she is excited to see come to fruition.</itunes:summary>
      <itunes:subtitle>Jen Gennai is Founder and Director of the Responsible Innovation Group at Google. In her current role leading the Responsible Innovation Group, Jen and her team are responsible for creating and operationalizing  Google’s AI Principles. In this episode, Je</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Ilana Golbin (PwC): Does sci-fi help or hinder AI understanding?</title>
      <itunes:episode>44</itunes:episode>
      <podcast:episode>44</podcast:episode>
      <itunes:title>Ilana Golbin (PwC): Does sci-fi help or hinder AI understanding?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">70f52173-a157-4e09-9a0a-3f8b49e96fa9</guid>
      <link>https://share.transistor.fm/s/344c1053</link>
      <description>
        <![CDATA[<p>Ilana Golbin is a Director in PwC Labs leading projects on Emerging Technology and AI. She is a Certified Ethical Emerging Technologist<br>and was recently recognized in Forbes as one of 15 leaders advancing Ethical AI. In this episode, Ilana shares the principles she uses to ensure confidence in AI systems used both internally, at PWC, and when advising clients. She explains some of the complexities<br>in the application of those principles, how responsible AI governance is part of a demonstration of cultural sensitivity and how sci-fi can be a helpful partner in the governance process.</p><p> ----- Subscribe to catch each new episode on Apple (<a href="https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151">https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151</a>),<br>Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a><br>and follow us on Twitter: @ai_equal and LinkedIn (<a href="https://www.linkedin.com/company/equalai/?viewAsMember=true">https://www.linkedin.com/company/equalai/?viewAsMember=true</a>).</p><p><br></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Ilana Golbin is a Director in PwC Labs leading projects on Emerging Technology and AI. She is a Certified Ethical Emerging Technologist<br>and was recently recognized in Forbes as one of 15 leaders advancing Ethical AI. In this episode, Ilana shares the principles she uses to ensure confidence in AI systems used both internally, at PWC, and when advising clients. She explains some of the complexities<br>in the application of those principles, how responsible AI governance is part of a demonstration of cultural sensitivity and how sci-fi can be a helpful partner in the governance process.</p><p> ----- Subscribe to catch each new episode on Apple (<a href="https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151">https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151</a>),<br>Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a><br>and follow us on Twitter: @ai_equal and LinkedIn (<a href="https://www.linkedin.com/company/equalai/?viewAsMember=true">https://www.linkedin.com/company/equalai/?viewAsMember=true</a>).</p><p><br></p>]]>
      </content:encoded>
      <pubDate>Thu, 24 Feb 2022 17:27:30 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/344c1053/e1665da6.mp3" length="44595762" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2783</itunes:duration>
      <itunes:summary>Ilana Golbin is a Director in PwC Labs leading projects on Emerging Technology and AI. She is a Certified Ethical Emerging Technologist
and was recently recognized in Forbes as one of 15 leaders advancing Ethical AI. In this episode, Ilana shares the principles she uses to ensure confidence in AI systems used both internally, at PWC, and when advising clients. She explains some of the complexities
in the application of those principles, how responsible AI governance is part of a demonstration of cultural sensitivity and how sci-fi can be a helpful partner in the governance process.

 ----- Subscribe to catch each new episode on Apple (https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151),
Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/
and follow us on Twitter: @ai_equal and LinkedIn (https://www.linkedin.com/company/equalai/?viewAsMember=true).</itunes:summary>
      <itunes:subtitle>Ilana Golbin is a Director in PwC Labs leading projects on Emerging Technology and AI. She is a Certified Ethical Emerging Technologist
and was recently recognized in Forbes as one of 15 leaders advancing Ethical AI. In this episode, Ilana shares the pri</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Renee Cummings: How AI Does (&amp; Should) Impact Our BHM Celebration</title>
      <itunes:episode>43</itunes:episode>
      <podcast:episode>43</podcast:episode>
      <itunes:title>Renee Cummings: How AI Does (&amp; Should) Impact Our BHM Celebration</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">12c76c6a-65de-40bb-842d-8808ca6e6c9f</guid>
      <link>https://share.transistor.fm/s/d24d1e30</link>
      <description>
        <![CDATA[Renée Cummings is a pioneering AI ethicist, Criminologist, Columbia University Community Scholar, and Founder of Urban AI. Her studies focus on the impact of AI on criminal justice, specifically in communities of color and incarcerated populations. In this episode, you will be inspired by Renee's insights on the impact that AI and data science has on our civil rights, how increasing diversity in AI is fundamental to creating technology that reflects our humanity, and improvements that still need to be made in areas such as trust and accountability. ----- Subscribe to catch each new episode on Apple (https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151), Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal and LinkedIn (https://www.linkedin.com/company/equalai/?viewAsMember=true)]]>
      </description>
      <content:encoded>
        <![CDATA[Renée Cummings is a pioneering AI ethicist, Criminologist, Columbia University Community Scholar, and Founder of Urban AI. Her studies focus on the impact of AI on criminal justice, specifically in communities of color and incarcerated populations. In this episode, you will be inspired by Renee's insights on the impact that AI and data science has on our civil rights, how increasing diversity in AI is fundamental to creating technology that reflects our humanity, and improvements that still need to be made in areas such as trust and accountability. ----- Subscribe to catch each new episode on Apple (https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151), Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal and LinkedIn (https://www.linkedin.com/company/equalai/?viewAsMember=true)]]>
      </content:encoded>
      <pubDate>Thu, 17 Feb 2022 17:48:28 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/d24d1e30/18345d56.mp3" length="50345691" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3142</itunes:duration>
      <itunes:summary>Renée Cummings is a pioneering AI ethicist, Criminologist, Columbia University Community Scholar, and Founder of Urban AI. Her studies focus on the impact of AI on criminal justice, specifically in communities of color and incarcerated populations. In this episode, you will be inspired by Renee's insights on the impact that AI and data science has on our civil rights, how increasing diversity in AI is fundamental to creating technology that reflects our humanity, and improvements that still need to be made in areas such as trust and accountability. ----- Subscribe to catch each new episode on Apple (https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151), Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal and LinkedIn (https://www.linkedin.com/company/equalai/?viewAsMember=true)</itunes:summary>
      <itunes:subtitle>Renée Cummings is a pioneering AI ethicist, Criminologist, Columbia University Community Scholar, and Founder of Urban AI. Her studies focus on the impact of AI on criminal justice, specifically in communities of color and incarcerated populations. In thi</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Marco Casalaina (Salesforce): Techno-Optimist not a Techno-Chauvinist</title>
      <itunes:episode>42</itunes:episode>
      <podcast:episode>42</podcast:episode>
      <itunes:title>Marco Casalaina (Salesforce): Techno-Optimist not a Techno-Chauvinist</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">42ecaba8-5f11-4d42-b136-664af8049370</guid>
      <link>https://share.transistor.fm/s/890aa4f7</link>
      <description>
        <![CDATA[<p>In this episode, Marco<br>Casalaina, Salesforce’s SVP of Product Management and GM of Einstein, explains how his decades of experience in AI and tech has resulted in his techno-optimism, how an AI<br>ethicist enhances his work and why he encourages others to join the EqualAI badge program. He also shares his excitement about rapidly developing transforming models but illuminates how this technology will be the next gen ethical AI quandary.-----<br>Subscribe to catch each new episode on Apple (<a href="https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151">https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151</a>),<br>Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a><br>and follow us on Twitter: @ai_equal and LinkedIn (<a href="https://www.linkedin.com/company/equalai/?viewAsMember=true">https://www.linkedin.com/company/equalai/?viewAsMember=true</a>).</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, Marco<br>Casalaina, Salesforce’s SVP of Product Management and GM of Einstein, explains how his decades of experience in AI and tech has resulted in his techno-optimism, how an AI<br>ethicist enhances his work and why he encourages others to join the EqualAI badge program. He also shares his excitement about rapidly developing transforming models but illuminates how this technology will be the next gen ethical AI quandary.-----<br>Subscribe to catch each new episode on Apple (<a href="https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151">https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151</a>),<br>Spotify and all major platforms. To learn more about EqualAI, visit our website: <a href="https://www.equalai.org/">https://www.equalai.org/</a><br>and follow us on Twitter: @ai_equal and LinkedIn (<a href="https://www.linkedin.com/company/equalai/?viewAsMember=true">https://www.linkedin.com/company/equalai/?viewAsMember=true</a>).</p>]]>
      </content:encoded>
      <pubDate>Fri, 11 Feb 2022 08:58:17 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/890aa4f7/0d021b3c.mp3" length="40480930" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2526</itunes:duration>
      <itunes:summary>In this episode, Marco
Casalaina, Salesforce’s SVP of Product Management and GM of Einstein, explains how his decades of experience in AI and tech has resulted in his techno-optimism, how an AI
ethicist enhances his work and why he encourages others to join the EqualAI badge program. He also shares his excitement about rapidly developing transforming models but illuminates how this technology will be the next gen ethical AI quandary.-----
Subscribe to catch each new episode on Apple (https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151),
Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/
and follow us on Twitter: @ai_equal and LinkedIn (https://www.linkedin.com/company/equalai/?viewAsMember=true).</itunes:summary>
      <itunes:subtitle>In this episode, Marco
Casalaina, Salesforce’s SVP of Product Management and GM of Einstein, explains how his decades of experience in AI and tech has resulted in his techno-optimism, how an AI
ethicist enhances his work and why he encourages others to </itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Mukesh Dalal, Stanley Black &amp; Decker (SBD): Why all companies will need a Chief AI Officer</title>
      <itunes:episode>41</itunes:episode>
      <podcast:episode>41</podcast:episode>
      <itunes:title>Mukesh Dalal, Stanley Black &amp; Decker (SBD): Why all companies will need a Chief AI Officer</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">d0462c56-e905-4f50-9c7e-39c9ae046eb9</guid>
      <link>https://share.transistor.fm/s/e51ab0c0</link>
      <description>
        <![CDATA[<p>As the Chief AI officer at Stanley Black and Decker (SBD), Mukesh Dalal has helped transform a 178-year-old global manufacturing company’s approach to AI with the vision of delivering $1 billion of value to the company through AI and Analytics. In this week’s episode, Mukesh outlines SBD’s forward-thinking strategy on AI, describes SBD’s journey into the responsible AI space, and foresees that soon all major companies will have Chief AI Officers to harness the business potential and root out risks of AI technology. Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>As the Chief AI officer at Stanley Black and Decker (SBD), Mukesh Dalal has helped transform a 178-year-old global manufacturing company’s approach to AI with the vision of delivering $1 billion of value to the company through AI and Analytics. In this week’s episode, Mukesh outlines SBD’s forward-thinking strategy on AI, describes SBD’s journey into the responsible AI space, and foresees that soon all major companies will have Chief AI Officers to harness the business potential and root out risks of AI technology. Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal.</p>]]>
      </content:encoded>
      <pubDate>Fri, 04 Feb 2022 12:30:52 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/e51ab0c0/d84de756.mp3" length="31303788" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1952</itunes:duration>
      <itunes:summary>As the Chief AI officer at Stanley Black and Decker (SBD), Mukesh Dalal has helped transform a 178-year-old global manufacturing company’s approach to AI with the vision of delivering $1 billion of value to the company through AI and Analytics. In this week’s episode, Mukesh outlines SBD’s forward-thinking strategy on AI, describes SBD’s journey into the responsible AI space, and foresees that soon all major companies will have Chief AI Officers to harness the business potential and root out risks of AI technology. Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal.</itunes:summary>
      <itunes:subtitle>As the Chief AI officer at Stanley Black and Decker (SBD), Mukesh Dalal has helped transform a 178-year-old global manufacturing company’s approach to AI with the vision of delivering $1 billion of value to the company through AI and Analytics. In this we</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Meghna Sinha: Why data scientists are like medical professionals; why ignorance is not an option and steps we all must take when making data-based decisions</title>
      <itunes:episode>40</itunes:episode>
      <podcast:episode>40</podcast:episode>
      <itunes:title>Meghna Sinha: Why data scientists are like medical professionals; why ignorance is not an option and steps we all must take when making data-based decisions</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">8eda5082-c460-49bf-9514-7a6f85210967</guid>
      <link>https://share.transistor.fm/s/816f92fe</link>
      <description>
        <![CDATA[Meghna Sinha is the Vice President of AI and Data at Verizon’s AI Center. Before joining Verizon, Meghna was Target’s VP of Data Sciences. In this episode, Meghna posits that data scientists are similar to medical practitioners, affirms that AI must start and end with humans, and shares lessons from the EqualAI Badge Program for Responsible AI Governance. Referenced papers/articles can be found here: WEF Toolkit (https://www3.weforum.org/docs/WEF_Empowering_AI_Leadership_2022.pdf) @VogelMiriam &amp; Robert Eccles article on AI as a necessary part of Board governance (https://corpgov.law.harvard.edu/2022/01/05/board-responsibility-for-artificial-intelligence-oversight/) Meghna’s article offering tips to women in tech: https://www.verizon.com/about/news/top-tips-women-tech 

 ----- Subscribe to catch each new episode on Apple (https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151), Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal and LinkedIn (https://www.linkedin.com/company/equalai/?viewAsMember=true).]]>
      </description>
      <content:encoded>
        <![CDATA[Meghna Sinha is the Vice President of AI and Data at Verizon’s AI Center. Before joining Verizon, Meghna was Target’s VP of Data Sciences. In this episode, Meghna posits that data scientists are similar to medical practitioners, affirms that AI must start and end with humans, and shares lessons from the EqualAI Badge Program for Responsible AI Governance. Referenced papers/articles can be found here: WEF Toolkit (https://www3.weforum.org/docs/WEF_Empowering_AI_Leadership_2022.pdf) @VogelMiriam &amp; Robert Eccles article on AI as a necessary part of Board governance (https://corpgov.law.harvard.edu/2022/01/05/board-responsibility-for-artificial-intelligence-oversight/) Meghna’s article offering tips to women in tech: https://www.verizon.com/about/news/top-tips-women-tech 

 ----- Subscribe to catch each new episode on Apple (https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151), Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal and LinkedIn (https://www.linkedin.com/company/equalai/?viewAsMember=true).]]>
      </content:encoded>
      <pubDate>Thu, 27 Jan 2022 20:09:07 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/816f92fe/ce5e0beb.mp3" length="38026415" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2371</itunes:duration>
      <itunes:summary>Meghna Sinha is the Vice President of AI and Data at Verizon’s AI Center. Before joining Verizon, Meghna was Target’s VP of Data Sciences. In this episode, Meghna posits that data scientists are similar to medical practitioners, affirms that AI must start and end with humans, and shares lessons from the EqualAI Badge Program for Responsible AI Governance. Referenced papers/articles can be found here: WEF Toolkit (https://www3.weforum.org/docs/WEF_Empowering_AI_Leadership_2022.pdf) @VogelMiriam &amp;amp; Robert Eccles article on AI as a necessary part of Board governance (https://corpgov.law.harvard.edu/2022/01/05/board-responsibility-for-artificial-intelligence-oversight/) Meghna’s article offering tips to women in tech: https://www.verizon.com/about/news/top-tips-women-tech 

 ----- Subscribe to catch each new episode on Apple (https://podcasts.apple.com/us/podcast/in-ai-we-trust/id1563248151), Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal and LinkedIn (https://www.linkedin.com/company/equalai/?viewAsMember=true).</itunes:summary>
      <itunes:subtitle>Meghna Sinha is the Vice President of AI and Data at Verizon’s AI Center. Before joining Verizon, Meghna was Target’s VP of Data Sciences. In this episode, Meghna posits that data scientists are similar to medical practitioners, affirms that AI must start</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>In AI we Trust welcomes its acclaimed new cohost, Kay Firth-Butterfield of the World Economic Forum</title>
      <itunes:episode>39</itunes:episode>
      <podcast:episode>39</podcast:episode>
      <itunes:title>In AI we Trust welcomes its acclaimed new cohost, Kay Firth-Butterfield of the World Economic Forum</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">3edb95b4-a4ec-406d-bf36-edf5997a673d</guid>
      <link>https://share.transistor.fm/s/2575c0ab</link>
      <description>
        <![CDATA[Kay Firth-Butterfield is a leader in AI governance. Her deep and wide-ranging experience as an entrepreneur, barrister, judge, and now as Head of Artificial Intelligence and Machine Learning and member of the Executive Committee at the World Economic Forum, has established Kay as an internationally recognized expert on the subject. Her numerous titles and awards include being featured in the New York Times as one of 10 Women Changing the Landscape of Leadership. In this episode, Miriam Vogel interviews her co-host, Kay Firth-Butterfield on her long-time commitment to exploring how humanity can equitably benefit from new technologies. ]]>
      </description>
      <content:encoded>
        <![CDATA[Kay Firth-Butterfield is a leader in AI governance. Her deep and wide-ranging experience as an entrepreneur, barrister, judge, and now as Head of Artificial Intelligence and Machine Learning and member of the Executive Committee at the World Economic Forum, has established Kay as an internationally recognized expert on the subject. Her numerous titles and awards include being featured in the New York Times as one of 10 Women Changing the Landscape of Leadership. In this episode, Miriam Vogel interviews her co-host, Kay Firth-Butterfield on her long-time commitment to exploring how humanity can equitably benefit from new technologies. ]]>
      </content:encoded>
      <pubDate>Tue, 18 Jan 2022 10:49:58 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/2575c0ab/6391e19b.mp3" length="36741783" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2292</itunes:duration>
      <itunes:summary>Kay Firth-Butterfield is a leader in AI governance. Her deep and wide-ranging experience as an entrepreneur, barrister, judge, and now as Head of Artificial Intelligence and Machine Learning and member of the Executive Committee at the World Economic Forum, has established Kay as an internationally recognized expert on the subject. Her numerous titles and awards include being featured in the New York Times as one of 10 Women Changing the Landscape of Leadership. In this episode, Miriam Vogel interviews her co-host, Kay Firth-Butterfield on her long-time commitment to exploring how humanity can equitably benefit from new technologies. </itunes:summary>
      <itunes:subtitle>Kay Firth-Butterfield is a leader in AI governance. Her deep and wide-ranging experience as an entrepreneur, barrister, judge, and now as Head of Artificial Intelligence and Machine Learning and member of the Executive Committee at the World Economic Foru</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>A year in review: the path toward responsible AI in 2021 (and farewell to Mark episode)</title>
      <itunes:episode>38</itunes:episode>
      <podcast:episode>38</podcast:episode>
      <itunes:title>A year in review: the path toward responsible AI in 2021 (and farewell to Mark episode)</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">3b9de61a-5fd6-43bf-822a-60d47ef6b35a</guid>
      <link>https://share.transistor.fm/s/f29ddd72</link>
      <description>
        <![CDATA[<p>In this episode, cohosts Miriam Vogel and Mark Caine share the conversations and highlights that have inspired them in 2021 and predict what we can expect to see in this space in 2021. We also bid farewell to Mark as he departs the World Economic Forum and takes on new adventures.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, cohosts Miriam Vogel and Mark Caine share the conversations and highlights that have inspired them in 2021 and predict what we can expect to see in this space in 2021. We also bid farewell to Mark as he departs the World Economic Forum and takes on new adventures.</p>]]>
      </content:encoded>
      <pubDate>Fri, 07 Jan 2022 11:04:32 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/f29ddd72/692e1935.mp3" length="23407748" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1459</itunes:duration>
      <itunes:summary>In this episode, cohosts Miriam Vogel and Mark Caine share the conversations and highlights that have inspired them in 2021 and predict what we can expect to see in this space in 2021. We also bid farewell to Mark as he departs the World Economic Forum and takes on new adventures.</itunes:summary>
      <itunes:subtitle>In this episode, cohosts Miriam Vogel and Mark Caine share the conversations and highlights that have inspired them in 2021 and predict what we can expect to see in this space in 2021. We also bid farewell to Mark as he departs the World Economic Forum an</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Amy Holcroft: How HPE is "living it in action" how the EqualAI Badge program, in collaboration with WEF, has helped this effort</title>
      <itunes:episode>36</itunes:episode>
      <podcast:episode>36</podcast:episode>
      <itunes:title>Amy Holcroft: How HPE is "living it in action" how the EqualAI Badge program, in collaboration with WEF, has helped this effort</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">87579444-e65d-4155-b665-ee9ac0cfbef1</guid>
      <link>https://share.transistor.fm/s/69152437</link>
      <description>
        <![CDATA[Amy Holcroft is the Chief Privacy Officer and VP of Privacy &amp; Info Governance at Hewlett Packard Enterprise (HPE). In this episode, Amy shares how she co-leads the establishment of HPE’s AI Ethics Advisory board and HPE’s AI Ethical Principles. She shares that her work requires resilience and thoughtful governance, and how participation in the EqualAI Badge Program on Responsible AI Governance, in collaboration with the World Economic Forum, supports her and her work at HPE, including a timely session with Cathy O'Neil that she put to good use immediately.

-----

Subscribe to catch each new episode on Apple, Spotify and all major platforms. 
To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Amy Holcroft is the Chief Privacy Officer and VP of Privacy &amp; Info Governance at Hewlett Packard Enterprise (HPE). In this episode, Amy shares how she co-leads the establishment of HPE’s AI Ethics Advisory board and HPE’s AI Ethical Principles. She shares that her work requires resilience and thoughtful governance, and how participation in the EqualAI Badge Program on Responsible AI Governance, in collaboration with the World Economic Forum, supports her and her work at HPE, including a timely session with Cathy O'Neil that she put to good use immediately.

-----

Subscribe to catch each new episode on Apple, Spotify and all major platforms. 
To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Thu, 09 Dec 2021 15:27:24 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/69152437/787e3f70.mp3" length="29555123" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1840</itunes:duration>
      <itunes:summary>Amy Holcroft is the Chief Privacy Officer and VP of Privacy &amp;amp; Info Governance at Hewlett Packard Enterprise (HPE). In this episode, Amy shares how she co-leads the establishment of HPE’s AI Ethics Advisory board and HPE’s AI Ethical Principles. She shares that her work requires resilience and thoughtful governance, and how participation in the EqualAI Badge Program on Responsible AI Governance, in collaboration with the World Economic Forum, supports her and her work at HPE, including a timely session with Cathy O'Neil that she put to good use immediately.

-----

Subscribe to catch each new episode on Apple, Spotify and all major platforms. 
To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Amy Holcroft is the Chief Privacy Officer and VP of Privacy &amp;amp; Info Governance at Hewlett Packard Enterprise (HPE). In this episode, Amy shares how she co-leads the establishment of HPE’s AI Ethics Advisory board and HPE’s AI Ethical Principles. She sh</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Seth Dobrin: How do you establish a human-centered approach to data and AI (and why is this necessary to succeed)?</title>
      <itunes:episode>35</itunes:episode>
      <podcast:episode>35</podcast:episode>
      <itunes:title>Seth Dobrin: How do you establish a human-centered approach to data and AI (and why is this necessary to succeed)?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">0e6c6528-4074-40e9-b7db-956fd46639ef</guid>
      <link>https://share.transistor.fm/s/88c65d63</link>
      <description>
        <![CDATA[Seth Dobrin is the Global Chief AI Officer of IBM. Seth has spent his career scaling and using existing technologies to address previously intractable problems at scale. In this episode, Seth shares concrete steps he has taken to create a more diverse and trust-based workplace, explains how his PhD in genetics is relevant and helpful to his current work in AI, and breaks down the what, why and how of a human-centered approach to AI.

-----

Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Seth Dobrin is the Global Chief AI Officer of IBM. Seth has spent his career scaling and using existing technologies to address previously intractable problems at scale. In this episode, Seth shares concrete steps he has taken to create a more diverse and trust-based workplace, explains how his PhD in genetics is relevant and helpful to his current work in AI, and breaks down the what, why and how of a human-centered approach to AI.

-----

Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Thu, 11 Nov 2021 10:11:06 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/88c65d63/595fcdc8.mp3" length="46003811" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2868</itunes:duration>
      <itunes:summary>Seth Dobrin is the Global Chief AI Officer of IBM. Seth has spent his career scaling and using existing technologies to address previously intractable problems at scale. In this episode, Seth shares concrete steps he has taken to create a more diverse and trust-based workplace, explains how his PhD in genetics is relevant and helpful to his current work in AI, and breaks down the what, why and how of a human-centered approach to AI.

-----

Subscribe to catch each new episode on Apple, Spotify and all major platforms. To learn more about EqualAI, visit our website: https://www.equalai.org/ and follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Seth Dobrin is the Global Chief AI Officer of IBM. Seth has spent his career scaling and using existing technologies to address previously intractable problems at scale. In this episode, Seth shares concrete steps he has taken to create a more diverse and</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Kat Zhou: What is the role of design in creating inclusive and equitable AI?</title>
      <itunes:episode>34</itunes:episode>
      <podcast:episode>34</podcast:episode>
      <itunes:title>Kat Zhou: What is the role of design in creating inclusive and equitable AI?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">7b8408f2-a4e5-4065-b179-566c0b4de3ec</guid>
      <link>https://share.transistor.fm/s/4458053f</link>
      <description>
        <![CDATA[Kat Zhou is a product designer focusing on integrating ethics into the design of AI systems. She is a leading voice for more inclusive and privacy-respecting approaches to AI, and she has called for greater regulation of AI and more human-centric business models for AI companies. In this episode, we ask Kat how governments, product designers, and corporate decision makers can minimize the harms of AI products – and whether there are any products that should never be developed to begin with. 

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Kat Zhou is a product designer focusing on integrating ethics into the design of AI systems. She is a leading voice for more inclusive and privacy-respecting approaches to AI, and she has called for greater regulation of AI and more human-centric business models for AI companies. In this episode, we ask Kat how governments, product designers, and corporate decision makers can minimize the harms of AI products – and whether there are any products that should never be developed to begin with. 

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Wed, 03 Nov 2021 10:13:20 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/4458053f/aca95b3f.mp3" length="32984770" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2054</itunes:duration>
      <itunes:summary>Kat Zhou is a product designer focusing on integrating ethics into the design of AI systems. She is a leading voice for more inclusive and privacy-respecting approaches to AI, and she has called for greater regulation of AI and more human-centric business models for AI companies. In this episode, we ask Kat how governments, product designers, and corporate decision makers can minimize the harms of AI products – and whether there are any products that should never be developed to begin with. 

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Kat Zhou is a product designer focusing on integrating ethics into the design of AI systems. She is a leading voice for more inclusive and privacy-respecting approaches to AI, and she has called for greater regulation of AI and more human-centric business</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Alex Kotran: Who needs AI literacy and how can we accelerate it? </title>
      <itunes:episode>33</itunes:episode>
      <podcast:episode>33</podcast:episode>
      <itunes:title>Alex Kotran: Who needs AI literacy and how can we accelerate it? </itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">a58d2d8f-a22b-498d-9bd4-0e238fa2e67c</guid>
      <link>https://share.transistor.fm/s/8af6f910</link>
      <description>
        <![CDATA[Alex Kotran is the co-founder and CEO of the AI Education Project, a non-profit that brings AI-related knowledge and skills to communities that are being impacted by AI and automation. In this episode, Alex highlights how the communities that are most impacted by AI are often the ones with the least access to basic AI knowledge, and how this is creating disparities in access to healthcare, financial services, criminal justice, and more. 

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Alex Kotran is the co-founder and CEO of the AI Education Project, a non-profit that brings AI-related knowledge and skills to communities that are being impacted by AI and automation. In this episode, Alex highlights how the communities that are most impacted by AI are often the ones with the least access to basic AI knowledge, and how this is creating disparities in access to healthcare, financial services, criminal justice, and more. 

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Thu, 28 Oct 2021 11:08:02 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/8af6f910/11003852.mp3" length="27239568" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1691</itunes:duration>
      <itunes:summary>Alex Kotran is the co-founder and CEO of the AI Education Project, a non-profit that brings AI-related knowledge and skills to communities that are being impacted by AI and automation. In this episode, Alex highlights how the communities that are most impacted by AI are often the ones with the least access to basic AI knowledge, and how this is creating disparities in access to healthcare, financial services, criminal justice, and more. 

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Alex Kotran is the co-founder and CEO of the AI Education Project, a non-profit that brings AI-related knowledge and skills to communities that are being impacted by AI and automation. In this episode, Alex highlights how the communities that are most imp</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Meg King of the Wilson Center: Who is ensuring policy makers are able to "speak AI"?</title>
      <itunes:episode>32</itunes:episode>
      <podcast:episode>32</podcast:episode>
      <itunes:title>Meg King of the Wilson Center: Who is ensuring policy makers are able to "speak AI"?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">a35a6993-2c03-4fbf-b2da-b8d9af1822bc</guid>
      <link>https://share.transistor.fm/s/81a6f069</link>
      <description>
        <![CDATA[Meg King is the Director of the Science and Technology Innovation Program at the Wilson Center; a non-partisan think tank created by Congress. She leads innovative transnational projects examining the development of emerging technology and related policy opportunities.
Her program also provides training seminars for Congressional and Executive branch staff to develop technology knowledge and skills. In
this episode, Meg shares context on her recent congressional testimony, the goals of her work at the Wilson Center and lessons we can learn from AI frameworks and policies abroad.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Meg King is the Director of the Science and Technology Innovation Program at the Wilson Center; a non-partisan think tank created by Congress. She leads innovative transnational projects examining the development of emerging technology and related policy opportunities.
Her program also provides training seminars for Congressional and Executive branch staff to develop technology knowledge and skills. In
this episode, Meg shares context on her recent congressional testimony, the goals of her work at the Wilson Center and lessons we can learn from AI frameworks and policies abroad.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Fri, 22 Oct 2021 14:34:46 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/81a6f069/cf85daf3.mp3" length="29573530" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1840</itunes:duration>
      <itunes:summary>Meg King is the Director of the Science and Technology Innovation Program at the Wilson Center; a non-partisan think tank created by Congress. She leads innovative transnational projects examining the development of emerging technology and related policy opportunities.
Her program also provides training seminars for Congressional and Executive branch staff to develop technology knowledge and skills. In
this episode, Meg shares context on her recent congressional testimony, the goals of her work at the Wilson Center and lessons we can learn from AI frameworks and policies abroad.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Meg King is the Director of the Science and Technology Innovation Program at the Wilson Center; a non-partisan think tank created by Congress. She leads innovative transnational projects examining the development of emerging technology and related policy </itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>David Hardoon: Can AI be ethical?</title>
      <itunes:episode>31</itunes:episode>
      <podcast:episode>31</podcast:episode>
      <itunes:title>David Hardoon: Can AI be ethical?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">46a60319-6ddd-4b25-ab09-169d54a66b30</guid>
      <link>https://share.transistor.fm/s/88e3c97e</link>
      <description>
        <![CDATA[In this episode, we speak with David Hardoon, a self-proclaimed "data artist." He leads Data and AI efforts at UnionBank Philippines and serves as an external advisor to Singapore's Corrupt Investigation Practices Bureau (CPIB). David has extensive experience in both industry and academia with a PhD in Computer Science and B.Sc. in Computer Science and AI. He weighs in on both the high level concepts surrounding ethics and AI and offers practical steps he uses to support ethical governance with the AI systems under his purview. 

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[In this episode, we speak with David Hardoon, a self-proclaimed "data artist." He leads Data and AI efforts at UnionBank Philippines and serves as an external advisor to Singapore's Corrupt Investigation Practices Bureau (CPIB). David has extensive experience in both industry and academia with a PhD in Computer Science and B.Sc. in Computer Science and AI. He weighs in on both the high level concepts surrounding ethics and AI and offers practical steps he uses to support ethical governance with the AI systems under his purview. 

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Thu, 14 Oct 2021 16:15:03 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/88e3c97e/65446526.mp3" length="44808531" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2793</itunes:duration>
      <itunes:summary>In this episode, we speak with David Hardoon, a self-proclaimed "data artist." He leads Data and AI efforts at UnionBank Philippines and serves as an external advisor to Singapore's Corrupt Investigation Practices Bureau (CPIB). David has extensive experience in both industry and academia with a PhD in Computer Science and B.Sc. in Computer Science and AI. He weighs in on both the high level concepts surrounding ethics and AI and offers practical steps he uses to support ethical governance with the AI systems under his purview. 

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>In this episode, we speak with David Hardoon, a self-proclaimed "data artist." He leads Data and AI efforts at UnionBank Philippines and serves as an external advisor to Singapore's Corrupt Investigation Practices Bureau (CPIB). David has extensive experi</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Rep. Yvette Clarke: Why is AI regulation necessary during this time of racial reckoning? </title>
      <itunes:episode>30</itunes:episode>
      <podcast:episode>30</podcast:episode>
      <itunes:title>Rep. Yvette Clarke: Why is AI regulation necessary during this time of racial reckoning? </itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">b8d39e6f-e51c-43e9-a14a-e2f3e59cb6c5</guid>
      <link>https://share.transistor.fm/s/5508dd7f</link>
      <description>
        <![CDATA[Find out on this week's episode with special guest Congresswoman Yvette Clarke (NY-9th) why she makes AI a top priority in her work to protect vulnerable populations.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Find out on this week's episode with special guest Congresswoman Yvette Clarke (NY-9th) why she makes AI a top priority in her work to protect vulnerable populations.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Wed, 06 Oct 2021 10:08:16 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/5508dd7f/a38d0eee.mp3" length="32033439" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1996</itunes:duration>
      <itunes:summary>Find out on this week's episode with special guest Congresswoman Yvette Clarke (NY-9th) why she makes AI a top priority in her work to protect vulnerable populations.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Find out on this week's episode with special guest Congresswoman Yvette Clarke (NY-9th) why she makes AI a top priority in her work to protect vulnerable populations.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
Yo</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Elham Tabassi of NIST: Who ensures the U.S. has strong metrics, tools, &amp; standards for responsible AI?</title>
      <itunes:episode>29</itunes:episode>
      <podcast:episode>29</podcast:episode>
      <itunes:title>Elham Tabassi of NIST: Who ensures the U.S. has strong metrics, tools, &amp; standards for responsible AI?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">6d515ede-9504-44ed-a3df-750abdcabab7</guid>
      <link>https://share.transistor.fm/s/a2f98e61</link>
      <description>
        <![CDATA[Observers have been skeptical about the ability of the US to lead in AI and establish the necessary framework to ensure its safe and effective development. NIST – the National Institute of Standards and Technology – is responding to that call. In this episode, we speak with Elham Tabassi who is leading NIST's work to support safe and effective Artificial Intelligence. Elham the Chief of Staff in the Information Technology Laboratory (ITL) and serves on the National AI Research Resource Task Force, announced by the White House and the National Science Foundation (NSF) last June.  Learn what makes NIST's 'secret sauce' for impactful work (spoiler: it involves you) and participate in the discussion through upcoming workshops and listening sessions: https://www.nist.gov/itl/ai-risk-management-framework/ai-rmf-development-request-information

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Observers have been skeptical about the ability of the US to lead in AI and establish the necessary framework to ensure its safe and effective development. NIST – the National Institute of Standards and Technology – is responding to that call. In this episode, we speak with Elham Tabassi who is leading NIST's work to support safe and effective Artificial Intelligence. Elham the Chief of Staff in the Information Technology Laboratory (ITL) and serves on the National AI Research Resource Task Force, announced by the White House and the National Science Foundation (NSF) last June.  Learn what makes NIST's 'secret sauce' for impactful work (spoiler: it involves you) and participate in the discussion through upcoming workshops and listening sessions: https://www.nist.gov/itl/ai-risk-management-framework/ai-rmf-development-request-information

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 28 Sep 2021 18:12:54 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/a2f98e61/d459b7f6.mp3" length="46853815" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2921</itunes:duration>
      <itunes:summary>Observers have been skeptical about the ability of the US to lead in AI and establish the necessary framework to ensure its safe and effective development. NIST – the National Institute of Standards and Technology – is responding to that call. In this episode, we speak with Elham Tabassi who is leading NIST's work to support safe and effective Artificial Intelligence. Elham the Chief of Staff in the Information Technology Laboratory (ITL) and serves on the National AI Research Resource Task Force, announced by the White House and the National Science Foundation (NSF) last June.  Learn what makes NIST's 'secret sauce' for impactful work (spoiler: it involves you) and participate in the discussion through upcoming workshops and listening sessions: https://www.nist.gov/itl/ai-risk-management-framework/ai-rmf-development-request-information

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Observers have been skeptical about the ability of the US to lead in AI and establish the necessary framework to ensure its safe and effective development. NIST – the National Institute of Standards and Technology – is responding to that call. In this epi</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Taka Ariga and Stephen Sanford: What is the U.S. GAO's AI Framework?</title>
      <itunes:episode>28</itunes:episode>
      <podcast:episode>28</podcast:episode>
      <itunes:title>Taka Ariga and Stephen Sanford: What is the U.S. GAO's AI Framework?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">dc72adad-30e9-412f-873e-475b3a6075ea</guid>
      <link>https://share.transistor.fm/s/debd80f3</link>
      <description>
        <![CDATA[Taka Ariga is the first Chief Data Scientist and Director of the Innovation Lab at the U.S. Government Accountability Office (GAO). Stephen Sanford is the Managing Director in GAO’s Strategic Planning and External Liaison team. Taka and Stephen are the authors of the GAO's recently released AI Framework, one of the first resources provided by the U.S. government to help identify best practices and the principles to deploy, monitor and evaluate AI responsibly. In this episode, we ask the AI Framework authors why they took on this initiative and lessons learned that are broadly applicable across industry.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Taka Ariga is the first Chief Data Scientist and Director of the Innovation Lab at the U.S. Government Accountability Office (GAO). Stephen Sanford is the Managing Director in GAO’s Strategic Planning and External Liaison team. Taka and Stephen are the authors of the GAO's recently released AI Framework, one of the first resources provided by the U.S. government to help identify best practices and the principles to deploy, monitor and evaluate AI responsibly. In this episode, we ask the AI Framework authors why they took on this initiative and lessons learned that are broadly applicable across industry.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Thu, 23 Sep 2021 15:42:23 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/debd80f3/fcee1295.mp3" length="41491325" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2588</itunes:duration>
      <itunes:summary>Taka Ariga is the first Chief Data Scientist and Director of the Innovation Lab at the U.S. Government Accountability Office (GAO). Stephen Sanford is the Managing Director in GAO’s Strategic Planning and External Liaison team. Taka and Stephen are the authors of the GAO's recently released AI Framework, one of the first resources provided by the U.S. government to help identify best practices and the principles to deploy, monitor and evaluate AI responsibly. In this episode, we ask the AI Framework authors why they took on this initiative and lessons learned that are broadly applicable across industry.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Taka Ariga is the first Chief Data Scientist and Director of the Innovation Lab at the U.S. Government Accountability Office (GAO). Stephen Sanford is the Managing Director in GAO’s Strategic Planning and External Liaison team. Taka and Stephen are the au</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Vilas Dhar: How can civil society shape a positive, human-centric future for AI?</title>
      <itunes:episode>27</itunes:episode>
      <podcast:episode>27</podcast:episode>
      <itunes:title>Vilas Dhar: How can civil society shape a positive, human-centric future for AI?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">5e5c6483-b2b9-40b0-b156-0cacc728f7d9</guid>
      <link>https://share.transistor.fm/s/97c30955</link>
      <description>
        <![CDATA[Vilas Dhar is a technologist, lawyer, and human rights advocate championing a new social compact for the digital age. As President and Trustee of the Patrick J. McGovern Foundation, he is a global leader in advancing artificial intelligence and data solutions to create a thriving, equitable, and sustainable future for all. In this episode we ask Vilas how he arrived at the intersection of AI and philanthropy, and how he thinks philanthropists and civil society can shape a more inclusive and societally beneficial future for AI. 

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Vilas Dhar is a technologist, lawyer, and human rights advocate championing a new social compact for the digital age. As President and Trustee of the Patrick J. McGovern Foundation, he is a global leader in advancing artificial intelligence and data solutions to create a thriving, equitable, and sustainable future for all. In this episode we ask Vilas how he arrived at the intersection of AI and philanthropy, and how he thinks philanthropists and civil society can shape a more inclusive and societally beneficial future for AI. 

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Thu, 16 Sep 2021 11:59:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/97c30955/11fd326a.mp3" length="38990916" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2430</itunes:duration>
      <itunes:summary>Vilas Dhar is a technologist, lawyer, and human rights advocate championing a new social compact for the digital age. As President and Trustee of the Patrick J. McGovern Foundation, he is a global leader in advancing artificial intelligence and data solutions to create a thriving, equitable, and sustainable future for all. In this episode we ask Vilas how he arrived at the intersection of AI and philanthropy, and how he thinks philanthropists and civil society can shape a more inclusive and societally beneficial future for AI. 

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Vilas Dhar is a technologist, lawyer, and human rights advocate championing a new social compact for the digital age. As President and Trustee of the Patrick J. McGovern Foundation, he is a global leader in advancing artificial intelligence and data solut</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Steve Mills: How can companies walk the walk on responsible AI? </title>
      <itunes:episode>26</itunes:episode>
      <podcast:episode>26</podcast:episode>
      <itunes:title>Steve Mills: How can companies walk the walk on responsible AI? </itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">84ceb013-41f8-4e71-b09b-faf183e07388</guid>
      <link>https://share.transistor.fm/s/a4563c46</link>
      <description>
        <![CDATA[Steve Mills is a Partner at Boston Consulting Group (BCG), where he serves as Chief AI Ethics Officer and the Global Lead for Artificial Intelligence in the Public Sector. He has worked with dozens of leading companies and government agencies to improve their AI practices, and in this episode he shares some of the key lessons he has learned about how organizations can translate their ethical AI commitments into practical, meaningful actions. 


-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Steve Mills is a Partner at Boston Consulting Group (BCG), where he serves as Chief AI Ethics Officer and the Global Lead for Artificial Intelligence in the Public Sector. He has worked with dozens of leading companies and government agencies to improve their AI practices, and in this episode he shares some of the key lessons he has learned about how organizations can translate their ethical AI commitments into practical, meaningful actions. 


-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 24 Aug 2021 17:05:25 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/a4563c46/b5c591a9.mp3" length="34362045" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2145</itunes:duration>
      <itunes:summary>Steve Mills is a Partner at Boston Consulting Group (BCG), where he serves as Chief AI Ethics Officer and the Global Lead for Artificial Intelligence in the Public Sector. He has worked with dozens of leading companies and government agencies to improve their AI practices, and in this episode he shares some of the key lessons he has learned about how organizations can translate their ethical AI commitments into practical, meaningful actions. 


-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Steve Mills is a Partner at Boston Consulting Group (BCG), where he serves as Chief AI Ethics Officer and the Global Lead for Artificial Intelligence in the Public Sector. He has worked with dozens of leading companies and government agencies to improve t</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Julia Stoyanovich: Can AI systems operate fairly within complex, diverse societies?</title>
      <itunes:episode>25</itunes:episode>
      <podcast:episode>25</podcast:episode>
      <itunes:title>Julia Stoyanovich: Can AI systems operate fairly within complex, diverse societies?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">14915c01-9388-4a6c-a587-18eceaa8ac7b</guid>
      <link>https://share.transistor.fm/s/4728ad90</link>
      <description>
        <![CDATA[Julia Stoyanovich is an Assistant Professor in the Department of Computer Science and Engineering at NYU’s Tandon School of Engineering, where she is also the Director of the Center for Responsible AI. Her research focuses on responsible data management and analysis and on practical tools for operationalizing fairness, diversity, transparency, and data protection in all stages of data acquisition and processing. In addition to conducting field-leading research and teaching, Professor Stoyanovich has written several comics aimed at communicating complex AI issues to diverse audiences.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Julia Stoyanovich is an Assistant Professor in the Department of Computer Science and Engineering at NYU’s Tandon School of Engineering, where she is also the Director of the Center for Responsible AI. Her research focuses on responsible data management and analysis and on practical tools for operationalizing fairness, diversity, transparency, and data protection in all stages of data acquisition and processing. In addition to conducting field-leading research and teaching, Professor Stoyanovich has written several comics aimed at communicating complex AI issues to diverse audiences.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Wed, 18 Aug 2021 11:22:18 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/4728ad90/4f1dcf29.mp3" length="40991556" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2555</itunes:duration>
      <itunes:summary>Julia Stoyanovich is an Assistant Professor in the Department of Computer Science and Engineering at NYU’s Tandon School of Engineering, where she is also the Director of the Center for Responsible AI. Her research focuses on responsible data management and analysis and on practical tools for operationalizing fairness, diversity, transparency, and data protection in all stages of data acquisition and processing. In addition to conducting field-leading research and teaching, Professor Stoyanovich has written several comics aimed at communicating complex AI issues to diverse audiences.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Julia Stoyanovich is an Assistant Professor in the Department of Computer Science and Engineering at NYU’s Tandon School of Engineering, where she is also the Director of the Center for Responsible AI. Her research focuses on responsible data management a</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Oren Etzioni: Why is the term "machine learning" a misnomer?</title>
      <itunes:episode>24</itunes:episode>
      <podcast:episode>24</podcast:episode>
      <itunes:title>Oren Etzioni: Why is the term "machine learning" a misnomer?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">fcba86a3-3e45-4e71-a648-971cfc556d1f</guid>
      <link>https://share.transistor.fm/s/78475256</link>
      <description>
        <![CDATA[Dr. Oren Etzioni is Chief Executive Officer at AI2, the Allen Institute for AI, a non-profit that offers foundational research, applied research and user-facing products. He is Professor Emeritus at University of Washington and a Venture Partner at the Madrona Venture Group. He has won numerous awards and founded several companies, has written over 100 technical papers, and provides commentary on AI for The New York Times, Wired, and Nature. In this episode, Oren explains why “machine learning” is a misnomer and some of the exciting AI innovations he is supporting that will result in greater inclusivity.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Dr. Oren Etzioni is Chief Executive Officer at AI2, the Allen Institute for AI, a non-profit that offers foundational research, applied research and user-facing products. He is Professor Emeritus at University of Washington and a Venture Partner at the Madrona Venture Group. He has won numerous awards and founded several companies, has written over 100 technical papers, and provides commentary on AI for The New York Times, Wired, and Nature. In this episode, Oren explains why “machine learning” is a misnomer and some of the exciting AI innovations he is supporting that will result in greater inclusivity.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 10 Aug 2021 12:00:00 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/78475256/6bf7146a.mp3" length="35666944" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2222</itunes:duration>
      <itunes:summary>Dr. Oren Etzioni is Chief Executive Officer at AI2, the Allen Institute for AI, a non-profit that offers foundational research, applied research and user-facing products. He is Professor Emeritus at University of Washington and a Venture Partner at the Madrona Venture Group. He has won numerous awards and founded several companies, has written over 100 technical papers, and provides commentary on AI for The New York Times, Wired, and Nature. In this episode, Oren explains why “machine learning” is a misnomer and some of the exciting AI innovations he is supporting that will result in greater inclusivity.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Dr. Oren Etzioni is Chief Executive Officer at AI2, the Allen Institute for AI, a non-profit that offers foundational research, applied research and user-facing products. He is Professor Emeritus at University of Washington and a Venture Partner at the Ma</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Alexandra Givens: What makes tech to social justice issue of our time?</title>
      <itunes:episode>23</itunes:episode>
      <podcast:episode>23</podcast:episode>
      <itunes:title>Alexandra Givens: What makes tech to social justice issue of our time?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">391cac36-0209-4239-bdc6-32901248a6fc</guid>
      <link>https://share.transistor.fm/s/6b0558f2</link>
      <description>
        <![CDATA[Alexandra Reeve Givens is the President &amp; CEO of the Center for Democracy and Technology (CDT). She is an advocate for using technology to increase equality, amplify voices, and promote human rights. Previously, Alexandra served as the founding Executive Director of the Institute for Technology Law &amp; Policy at Georgetown Law, served as Chief Counsel for IP and Antitrust on the Senate Judiciary Committee and began her career as a litigator at Cravath, Swaine &amp; Moore. In this episode, Alexandra explains her unconventional path to the tech space as a lawyer and why she believes technology is the social justice issue of our time.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Alexandra Reeve Givens is the President &amp; CEO of the Center for Democracy and Technology (CDT). She is an advocate for using technology to increase equality, amplify voices, and promote human rights. Previously, Alexandra served as the founding Executive Director of the Institute for Technology Law &amp; Policy at Georgetown Law, served as Chief Counsel for IP and Antitrust on the Senate Judiciary Committee and began her career as a litigator at Cravath, Swaine &amp; Moore. In this episode, Alexandra explains her unconventional path to the tech space as a lawyer and why she believes technology is the social justice issue of our time.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Thu, 05 Aug 2021 10:25:45 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/6b0558f2/42899687.mp3" length="25272217" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1573</itunes:duration>
      <itunes:summary>Alexandra Reeve Givens is the President &amp;amp; CEO of the Center for Democracy and Technology (CDT). She is an advocate for using technology to increase equality, amplify voices, and promote human rights. Previously, Alexandra served as the founding Executive Director of the Institute for Technology Law &amp;amp; Policy at Georgetown Law, served as Chief Counsel for IP and Antitrust on the Senate Judiciary Committee and began her career as a litigator at Cravath, Swaine &amp;amp; Moore. In this episode, Alexandra explains her unconventional path to the tech space as a lawyer and why she believes technology is the social justice issue of our time.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Alexandra Reeve Givens is the President &amp;amp; CEO of the Center for Democracy and Technology (CDT). She is an advocate for using technology to increase equality, amplify voices, and promote human rights. Previously, Alexandra served as the founding Execut</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Navrina Singh: How AI is a multi-stakeholder problem and how do we solve for it? (Spoiler: it's all about trust.)</title>
      <itunes:episode>22</itunes:episode>
      <podcast:episode>22</podcast:episode>
      <itunes:title>Navrina Singh: How AI is a multi-stakeholder problem and how do we solve for it? (Spoiler: it's all about trust.)</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">4a080d46-c022-4918-aa54-7a2b1d491569</guid>
      <link>https://share.transistor.fm/s/8cba5a73</link>
      <description>
        <![CDATA[Navrina Singh is the Founder &amp; CEO of Credo AI, whose mission is to empower organizations to deliver trustworthy and responsible AI through AI audit and governance products. Navrina serves on the Board of Directors of Mozilla and Stella Labs. Previously she served as the Product leader focused on AI at Microsoft where she was responsible for building and commercializing Enterprise Virtual Agents and spent 12+ years at Qualcomm. In this episode, Navrina shares several insights into responsible AI, including the 3 key elements to building trust in AI and the 4 components of the "Ethical AI flywheel."

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Navrina Singh is the Founder &amp; CEO of Credo AI, whose mission is to empower organizations to deliver trustworthy and responsible AI through AI audit and governance products. Navrina serves on the Board of Directors of Mozilla and Stella Labs. Previously she served as the Product leader focused on AI at Microsoft where she was responsible for building and commercializing Enterprise Virtual Agents and spent 12+ years at Qualcomm. In this episode, Navrina shares several insights into responsible AI, including the 3 key elements to building trust in AI and the 4 components of the "Ethical AI flywheel."

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Wed, 28 Jul 2021 16:52:55 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/8cba5a73/33697f02.mp3" length="36579659" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2279</itunes:duration>
      <itunes:summary>Navrina Singh is the Founder &amp;amp; CEO of Credo AI, whose mission is to empower organizations to deliver trustworthy and responsible AI through AI audit and governance products. Navrina serves on the Board of Directors of Mozilla and Stella Labs. Previously she served as the Product leader focused on AI at Microsoft where she was responsible for building and commercializing Enterprise Virtual Agents and spent 12+ years at Qualcomm. In this episode, Navrina shares several insights into responsible AI, including the 3 key elements to building trust in AI and the 4 components of the "Ethical AI flywheel."

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Navrina Singh is the Founder &amp;amp; CEO of Credo AI, whose mission is to empower organizations to deliver trustworthy and responsible AI through AI audit and governance products. Navrina serves on the Board of Directors of Mozilla and Stella Labs. Previous</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Andrew Burt: How can lawyers be partners in the AI space?</title>
      <itunes:episode>21</itunes:episode>
      <podcast:episode>21</podcast:episode>
      <itunes:title>Andrew Burt: How can lawyers be partners in the AI space?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">449a4160-c8ae-4c51-9547-66a6ac9eca64</guid>
      <link>https://share.transistor.fm/s/3418d448</link>
      <description>
        <![CDATA[Andrew Burt is a lawyer specializing in artificial intelligence, information security and data privacy. He co-founded bnh.ai and serves as chief legal officer of Immuta. His work has been profiled by magazines like Fast Company and his writing has appeared in Harvard Business Review, the New York Times and the Financial Times. In this episode, we explore the 'hype cycle' of AI where risks are overlooked and the appropriate role of a lawyer as a partner in this space.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Andrew Burt is a lawyer specializing in artificial intelligence, information security and data privacy. He co-founded bnh.ai and serves as chief legal officer of Immuta. His work has been profiled by magazines like Fast Company and his writing has appeared in Harvard Business Review, the New York Times and the Financial Times. In this episode, we explore the 'hype cycle' of AI where risks are overlooked and the appropriate role of a lawyer as a partner in this space.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Thu, 22 Jul 2021 10:13:41 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/3418d448/628c136f.mp3" length="37413355" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2331</itunes:duration>
      <itunes:summary>Andrew Burt is a lawyer specializing in artificial intelligence, information security and data privacy. He co-founded bnh.ai and serves as chief legal officer of Immuta. His work has been profiled by magazines like Fast Company and his writing has appeared in Harvard Business Review, the New York Times and the Financial Times. In this episode, we explore the 'hype cycle' of AI where risks are overlooked and the appropriate role of a lawyer as a partner in this space.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Andrew Burt is a lawyer specializing in artificial intelligence, information security and data privacy. He co-founded bnh.ai and serves as chief legal officer of Immuta. His work has been profiled by magazines like Fast Company and his writing has appeare</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Anima Anandkumar: How can the intersection of academia and industry inform the next generation of AI?</title>
      <itunes:episode>20</itunes:episode>
      <podcast:episode>20</podcast:episode>
      <itunes:title>Anima Anandkumar: How can the intersection of academia and industry inform the next generation of AI?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">b63cb754-4fd1-488e-89b1-1ff5500fc146</guid>
      <link>https://share.transistor.fm/s/13328f68</link>
      <description>
        <![CDATA[Anima Anandkumar is an accomplished AI researcher in both academia and in industry. She is the Bren professor at Caltech CMS department and director of machine learning research at NVIDIA. Previously, Anima was a principal scientist at Amazon Web Service, where she enabled machine learning on the cloud infrastructure. Anima is the recipient of numerous awards and honors and has been featured in documentaries and articles by PBS, Wired, MIT Technology review, Forbes and many others. In this episode we learn about the “trinity of the deep learning revolution,” how the next generation of AI will bring the “mind &amp; body” together, and the detrimental impacts fostered by a lack of diversity in tech.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Anima Anandkumar is an accomplished AI researcher in both academia and in industry. She is the Bren professor at Caltech CMS department and director of machine learning research at NVIDIA. Previously, Anima was a principal scientist at Amazon Web Service, where she enabled machine learning on the cloud infrastructure. Anima is the recipient of numerous awards and honors and has been featured in documentaries and articles by PBS, Wired, MIT Technology review, Forbes and many others. In this episode we learn about the “trinity of the deep learning revolution,” how the next generation of AI will bring the “mind &amp; body” together, and the detrimental impacts fostered by a lack of diversity in tech.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 29 Jun 2021 20:23:54 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/13328f68/19188530.mp3" length="38128055" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2376</itunes:duration>
      <itunes:summary>Anima Anandkumar is an accomplished AI researcher in both academia and in industry. She is the Bren professor at Caltech CMS department and director of machine learning research at NVIDIA. Previously, Anima was a principal scientist at Amazon Web Service, where she enabled machine learning on the cloud infrastructure. Anima is the recipient of numerous awards and honors and has been featured in documentaries and articles by PBS, Wired, MIT Technology review, Forbes and many others. In this episode we learn about the “trinity of the deep learning revolution,” how the next generation of AI will bring the “mind &amp;amp; body” together, and the detrimental impacts fostered by a lack of diversity in tech.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Anima Anandkumar is an accomplished AI researcher in both academia and in industry. She is the Bren professor at Caltech CMS department and director of machine learning research at NVIDIA. Previously, Anima was a principal scientist at Amazon Web Service,</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Vivienne Ming: How can we create AI that lifts society up rather than tearing it down? </title>
      <itunes:episode>19</itunes:episode>
      <podcast:episode>19</podcast:episode>
      <itunes:title>Vivienne Ming: How can we create AI that lifts society up rather than tearing it down? </itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">3424ac2d-2128-4847-85ee-68f1309d294f</guid>
      <link>https://share.transistor.fm/s/82e65748</link>
      <description>
        <![CDATA[Vivienne Ming is an internationally recognized neuroscientist and AI expert who has pushed the boundaries of AI in diverse areas including education, human resources, disability, and physical and mental health. In this episode, we ask Vivienne how we can ensure that society captures the benefits of AI technologies while mitigating their risks and avoiding harms to vulnerable populations.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Vivienne Ming is an internationally recognized neuroscientist and AI expert who has pushed the boundaries of AI in diverse areas including education, human resources, disability, and physical and mental health. In this episode, we ask Vivienne how we can ensure that society captures the benefits of AI technologies while mitigating their risks and avoiding harms to vulnerable populations.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Thu, 24 Jun 2021 09:58:03 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/82e65748/0b031b5c.mp3" length="55817261" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>3482</itunes:duration>
      <itunes:summary>Vivienne Ming is an internationally recognized neuroscientist and AI expert who has pushed the boundaries of AI in diverse areas including education, human resources, disability, and physical and mental health. In this episode, we ask Vivienne how we can ensure that society captures the benefits of AI technologies while mitigating their risks and avoiding harms to vulnerable populations.

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Vivienne Ming is an internationally recognized neuroscientist and AI expert who has pushed the boundaries of AI in diverse areas including education, human resources, disability, and physical and mental health. In this episode, we ask Vivienne how we can </itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Heather Cox: Why did our company make a public commitment to equitable AI?</title>
      <itunes:episode>18</itunes:episode>
      <podcast:episode>18</podcast:episode>
      <itunes:title>Heather Cox: Why did our company make a public commitment to equitable AI?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">90cb34d3-b00d-4968-99fb-c9f0ac8f9afc</guid>
      <link>https://share.transistor.fm/s/95be7b04</link>
      <description>
        <![CDATA[On this episode, we hear from Heather Cox, Chief Digital Health and Analytics Officer at Humana. Heather brings 25 years of experience to the role including having served as Chief Technology and Digital Officer at USAA, and CEO of Citi FinTech at Citigroup. In this episode, Heather shares why she decided Humana should take the EqualAI Pledge to Reduce Bias in AI and how they have restructured their company and partnerships to ensure their AI programs better serve their population, and adhere to the core principle to "do no harm".

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[On this episode, we hear from Heather Cox, Chief Digital Health and Analytics Officer at Humana. Heather brings 25 years of experience to the role including having served as Chief Technology and Digital Officer at USAA, and CEO of Citi FinTech at Citigroup. In this episode, Heather shares why she decided Humana should take the EqualAI Pledge to Reduce Bias in AI and how they have restructured their company and partnerships to ensure their AI programs better serve their population, and adhere to the core principle to "do no harm".

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 15 Jun 2021 17:37:13 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/95be7b04/1c5e4d69.mp3" length="38759934" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2413</itunes:duration>
      <itunes:summary>On this episode, we hear from Heather Cox, Chief Digital Health and Analytics Officer at Humana. Heather brings 25 years of experience to the role including having served as Chief Technology and Digital Officer at USAA, and CEO of Citi FinTech at Citigroup. In this episode, Heather shares why she decided Humana should take the EqualAI Pledge to Reduce Bias in AI and how they have restructured their company and partnerships to ensure their AI programs better serve their population, and adhere to the core principle to "do no harm".

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>On this episode, we hear from Heather Cox, Chief Digital Health and Analytics Officer at Humana. Heather brings 25 years of experience to the role including having served as Chief Technology and Digital Officer at USAA, and CEO of Citi FinTech at Citigrou</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Commissioner Edward Santow: How can tech governance preserve human rights and achieve responsible AI?</title>
      <itunes:episode>17</itunes:episode>
      <podcast:episode>17</podcast:episode>
      <itunes:title>Commissioner Edward Santow: How can tech governance preserve human rights and achieve responsible AI?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">143a9805-8aa1-4912-b37c-776f17c34e7e</guid>
      <link>https://share.transistor.fm/s/e5034733</link>
      <description>
        <![CDATA[On this episode, we are thrilled to share our conversation with Commissioner Edward Santow of the Australian Human Rights Commission. The Commission recently released the Human Rights and Technology final report, which makes 38 recommendations to ensure human rights are upheld in Australia’s laws, policies, funding and education on AI. We ask him about lessons learned in the 3 year creation of this report and which recommendations are most universally applicable. Learn more about the report here: https://tech.humanrights.gov.au/

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[On this episode, we are thrilled to share our conversation with Commissioner Edward Santow of the Australian Human Rights Commission. The Commission recently released the Human Rights and Technology final report, which makes 38 recommendations to ensure human rights are upheld in Australia’s laws, policies, funding and education on AI. We ask him about lessons learned in the 3 year creation of this report and which recommendations are most universally applicable. Learn more about the report here: https://tech.humanrights.gov.au/

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 08 Jun 2021 14:46:24 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/e5034733/3465e1fc.mp3" length="39370764" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2454</itunes:duration>
      <itunes:summary>On this episode, we are thrilled to share our conversation with Commissioner Edward Santow of the Australian Human Rights Commission. The Commission recently released the Human Rights and Technology final report, which makes 38 recommendations to ensure human rights are upheld in Australia’s laws, policies, funding and education on AI. We ask him about lessons learned in the 3 year creation of this report and which recommendations are most universally applicable. Learn more about the report here: https://tech.humanrights.gov.au/

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>On this episode, we are thrilled to share our conversation with Commissioner Edward Santow of the Australian Human Rights Commission. The Commission recently released the Human Rights and Technology final report, which makes 38 recommendations to ensure h</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Tess Posner: How can we create a more inclusive technology industry?  </title>
      <itunes:episode>16</itunes:episode>
      <podcast:episode>16</podcast:episode>
      <itunes:title>Tess Posner: How can we create a more inclusive technology industry?  </itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">1baf281c-68cb-423a-b25c-4ae3969a28b1</guid>
      <link>https://share.transistor.fm/s/9e43ca7e</link>
      <description>
        <![CDATA[Tess Posner is the CEO of AI4ALL, an organization working to make the technology industry more inclusive and to ensure that AI is developed responsibly. Before joining AI4ALL, she was Managing Director of TechHire at Opportunity@Work, a national initiative launched out of the White House to increase diversity in the tech economy. In this episode, we explore the diversity challenges facing the technology industry and the exciting efforts that AI4ALL is leading to empower diverse young people to join – and improve – one of the most powerful industries shaping society today.  

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Tess Posner is the CEO of AI4ALL, an organization working to make the technology industry more inclusive and to ensure that AI is developed responsibly. Before joining AI4ALL, she was Managing Director of TechHire at Opportunity@Work, a national initiative launched out of the White House to increase diversity in the tech economy. In this episode, we explore the diversity challenges facing the technology industry and the exciting efforts that AI4ALL is leading to empower diverse young people to join – and improve – one of the most powerful industries shaping society today.  

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 01 Jun 2021 17:35:33 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/9e43ca7e/a54b3ad8.mp3" length="31184648" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1942</itunes:duration>
      <itunes:summary>Tess Posner is the CEO of AI4ALL, an organization working to make the technology industry more inclusive and to ensure that AI is developed responsibly. Before joining AI4ALL, she was Managing Director of TechHire at Opportunity@Work, a national initiative launched out of the White House to increase diversity in the tech economy. In this episode, we explore the diversity challenges facing the technology industry and the exciting efforts that AI4ALL is leading to empower diverse young people to join – and improve – one of the most powerful industries shaping society today.  

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Tess Posner is the CEO of AI4ALL, an organization working to make the technology industry more inclusive and to ensure that AI is developed responsibly. Before joining AI4ALL, she was Managing Director of TechHire at Opportunity@Work, a national initiativ</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Sarah Drinkwater: What is 'Responsible AI' and why don't we have it?</title>
      <itunes:episode>15</itunes:episode>
      <podcast:episode>15</podcast:episode>
      <itunes:title>Sarah Drinkwater: What is 'Responsible AI' and why don't we have it?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">f4d1d3f7-4a4d-4c40-aa74-85ba87688b78</guid>
      <link>https://share.transistor.fm/s/67e3b592</link>
      <description>
        <![CDATA[Sarah Drinkwater is director of the Responsible Technology team at the Omidyar Network where she works to help technologists prevent, mitigate, and correct societal downsides of technology—and maximize positive impact. Priorto Omidyar Network, Sarah was head of Campus London, Google’s first physical startup hub. At Google, Sarah also built and led a global Google Maps community team. She also advised startups and large brands on their social strategy and was a journalist. On this episode we ask, "What is 'Responsible AI' and why don't we have it?"

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Sarah Drinkwater is director of the Responsible Technology team at the Omidyar Network where she works to help technologists prevent, mitigate, and correct societal downsides of technology—and maximize positive impact. Priorto Omidyar Network, Sarah was head of Campus London, Google’s first physical startup hub. At Google, Sarah also built and led a global Google Maps community team. She also advised startups and large brands on their social strategy and was a journalist. On this episode we ask, "What is 'Responsible AI' and why don't we have it?"

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 25 May 2021 10:37:39 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/67e3b592/b855b78d.mp3" length="29265377" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1816</itunes:duration>
      <itunes:summary>Sarah Drinkwater is director of the Responsible Technology team at the Omidyar Network where she works to help technologists prevent, mitigate, and correct societal downsides of technology—and maximize positive impact. Priorto Omidyar Network, Sarah was head of Campus London, Google’s first physical startup hub. At Google, Sarah also built and led a global Google Maps community team. She also advised startups and large brands on their social strategy and was a journalist. On this episode we ask, "What is 'Responsible AI' and why don't we have it?"

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Sarah Drinkwater is director of the Responsible Technology team at the Omidyar Network where she works to help technologists prevent, mitigate, and correct societal downsides of technology—and maximize positive impact. Priorto Omidyar Network, Sarah was h</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Tim O'Brien: Is there a role for me in building ethical AI?</title>
      <itunes:episode>14</itunes:episode>
      <podcast:episode>14</podcast:episode>
      <itunes:title>Tim O'Brien: Is there a role for me in building ethical AI?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">8627850a-42a5-404f-bd3f-e5eb5b954525</guid>
      <link>https://share.transistor.fm/s/c03f1c96</link>
      <description>
        <![CDATA[In this episode we speak with Tim O'Brien who leads Ethical AI Advocacy at Microsoft. Before joining Microsoft in 2003, Tim worked as an engineer, a marketer and a consultant at startups and Fortune 500 companies. In this discussion, Tim leads us through Microsoft's journey – and his own – to become a leader in the field of AI ethics and answers the questions: what does an AI Ethicist do? And, is there a role for 'white guys' to play in this field?

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[In this episode we speak with Tim O'Brien who leads Ethical AI Advocacy at Microsoft. Before joining Microsoft in 2003, Tim worked as an engineer, a marketer and a consultant at startups and Fortune 500 companies. In this discussion, Tim leads us through Microsoft's journey – and his own – to become a leader in the field of AI ethics and answers the questions: what does an AI Ethicist do? And, is there a role for 'white guys' to play in this field?

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 18 May 2021 12:35:41 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/c03f1c96/52d740d0.mp3" length="37577491" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2341</itunes:duration>
      <itunes:summary>In this episode we speak with Tim O'Brien who leads Ethical AI Advocacy at Microsoft. Before joining Microsoft in 2003, Tim worked as an engineer, a marketer and a consultant at startups and Fortune 500 companies. In this discussion, Tim leads us through Microsoft's journey – and his own – to become a leader in the field of AI ethics and answers the questions: what does an AI Ethicist do? And, is there a role for 'white guys' to play in this field?

-----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>In this episode we speak with Tim O'Brien who leads Ethical AI Advocacy at Microsoft. Before joining Microsoft in 2003, Tim worked as an engineer, a marketer and a consultant at startups and Fortune 500 companies. In this discussion, Tim leads us through </itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Aneesh Chopra: How would you grade the US government's tech readiness?</title>
      <itunes:episode>13</itunes:episode>
      <podcast:episode>13</podcast:episode>
      <itunes:title>Aneesh Chopra: How would you grade the US government's tech readiness?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">1a52293f-844c-415e-b756-dc0144b3e802</guid>
      <link>https://share.transistor.fm/s/8a7a2697</link>
      <description>
        <![CDATA[Aneesh Chopra served as the first Chief Technology Officer of the United States. He is currently the president of CareJourney, a provider of clinically-relevant analytics that builds a rating system of healthcare networks. He is also the co-founder of a data analytics
investment group, Hunch Analytics. Aneesh sits on the Board of the Health Care Cost Institute, a non-profit focused on unbiased health care utilization and cost information. Previously, Aneesh served as Virginia’s Secretary of Technology and wrote of his experience
in government and tech in his book "Innovative State: How New Technologies Can Transform Government." 

In this episode, Aneesh rates our current state of tech in the US and shares other insights rooted in his experience in state and federal government. (Spoiler: he is no grade inflationist!)

----


To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Aneesh Chopra served as the first Chief Technology Officer of the United States. He is currently the president of CareJourney, a provider of clinically-relevant analytics that builds a rating system of healthcare networks. He is also the co-founder of a data analytics
investment group, Hunch Analytics. Aneesh sits on the Board of the Health Care Cost Institute, a non-profit focused on unbiased health care utilization and cost information. Previously, Aneesh served as Virginia’s Secretary of Technology and wrote of his experience
in government and tech in his book "Innovative State: How New Technologies Can Transform Government." 

In this episode, Aneesh rates our current state of tech in the US and shares other insights rooted in his experience in state and federal government. (Spoiler: he is no grade inflationist!)

----


To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 11 May 2021 15:36:35 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/8a7a2697/4085be17.mp3" length="31811509" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1979</itunes:duration>
      <itunes:summary>Aneesh Chopra served as the first Chief Technology Officer of the United States. He is currently the president of CareJourney, a provider of clinically-relevant analytics that builds a rating system of healthcare networks. He is also the co-founder of a data analytics
investment group, Hunch Analytics. Aneesh sits on the Board of the Health Care Cost Institute, a non-profit focused on unbiased health care utilization and cost information. Previously, Aneesh served as Virginia’s Secretary of Technology and wrote of his experience
in government and tech in his book "Innovative State: How New Technologies Can Transform Government." 

In this episode, Aneesh rates our current state of tech in the US and shares other insights rooted in his experience in state and federal government. (Spoiler: he is no grade inflationist!)

----


To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Aneesh Chopra served as the first Chief Technology Officer of the United States. He is currently the president of CareJourney, a provider of clinically-relevant analytics that builds a rating system of healthcare networks. He is also the co-founder of a d</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Ashley Casovan: There are tools to help governments and companies reduce bias in AI</title>
      <itunes:episode>12</itunes:episode>
      <podcast:episode>12</podcast:episode>
      <itunes:title>Ashley Casovan: There are tools to help governments and companies reduce bias in AI</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">bd64f0da-19d6-4038-b02a-85d19542e240</guid>
      <link>https://share.transistor.fm/s/72f0ae90</link>
      <description>
        <![CDATA[In this episode, we interview our friend and colleague, Ashley Casovan, Executive Director of Responsible AI Institute, formerly AI Global, a non-profit dedicated to creating practical tools to ensure the responsible use of AI. Previously, Ashley served as Director of Data and Digital at the Government of Canada, where she led research and policy development related to data, open source, and artificial intelligence. Ashley helps us answer the pressing question in AI: should we rely on internal corporate monitoring, government regulation, third party certification, or some combination? 

Learn more about the Responsible AI Institute: https://www.responsible.ai/
More information on their upcoming work: https://www.responsible.ai/Whitepaper.pdf

----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[In this episode, we interview our friend and colleague, Ashley Casovan, Executive Director of Responsible AI Institute, formerly AI Global, a non-profit dedicated to creating practical tools to ensure the responsible use of AI. Previously, Ashley served as Director of Data and Digital at the Government of Canada, where she led research and policy development related to data, open source, and artificial intelligence. Ashley helps us answer the pressing question in AI: should we rely on internal corporate monitoring, government regulation, third party certification, or some combination? 

Learn more about the Responsible AI Institute: https://www.responsible.ai/
More information on their upcoming work: https://www.responsible.ai/Whitepaper.pdf

----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 04 May 2021 10:49:42 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/72f0ae90/47da7659.mp3" length="36433925" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2269</itunes:duration>
      <itunes:summary>In this episode, we interview our friend and colleague, Ashley Casovan, Executive Director of Responsible AI Institute, formerly AI Global, a non-profit dedicated to creating practical tools to ensure the responsible use of AI. Previously, Ashley served as Director of Data and Digital at the Government of Canada, where she led research and policy development related to data, open source, and artificial intelligence. Ashley helps us answer the pressing question in AI: should we rely on internal corporate monitoring, government regulation, third party certification, or some combination? 

Learn more about the Responsible AI Institute: https://www.responsible.ai/
More information on their upcoming work: https://www.responsible.ai/Whitepaper.pdf

----

To learn more about EqualAI, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>In this episode, we interview our friend and colleague, Ashley Casovan, Executive Director of Responsible AI Institute, formerly AI Global, a non-profit dedicated to creating practical tools to ensure the responsible use of AI. Previously, Ashley served a</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Kush Varshney: Can we trust AI?</title>
      <itunes:episode>11</itunes:episode>
      <podcast:episode>11</podcast:episode>
      <itunes:title>Kush Varshney: Can we trust AI?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">2f8df7c0-5568-482e-8c40-c99ad74d7cc1</guid>
      <link>https://share.transistor.fm/s/a451b21f</link>
      <description>
        <![CDATA[Dr. Kush R. Varshney is a distinguished research staff member and manager in IBM Research AI at the Thomas J. Watson Research Center where he has conducted cutting edge AI and Machine Learning research for the past ten years. Varshney also serves as co-director of IBM’s Science for Social Good program. Varshney received both a Masters in Science and a Ph.D. in electrical Engineering and Computer Science from MIT. In addition to writing numerous articles on AI, Varshney helped develop AI Fairness 360-a comprehensive open-source toolkit of metrics to check for unwanted bias in datasets and machine learning models. Varshney recently released his book Trust in Machine Learning.

----

To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Dr. Kush R. Varshney is a distinguished research staff member and manager in IBM Research AI at the Thomas J. Watson Research Center where he has conducted cutting edge AI and Machine Learning research for the past ten years. Varshney also serves as co-director of IBM’s Science for Social Good program. Varshney received both a Masters in Science and a Ph.D. in electrical Engineering and Computer Science from MIT. In addition to writing numerous articles on AI, Varshney helped develop AI Fairness 360-a comprehensive open-source toolkit of metrics to check for unwanted bias in datasets and machine learning models. Varshney recently released his book Trust in Machine Learning.

----

To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 27 Apr 2021 09:18:13 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/a451b21f/b549e349.mp3" length="36121751" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2253</itunes:duration>
      <itunes:summary>Dr. Kush R. Varshney is a distinguished research staff member and manager in IBM Research AI at the Thomas J. Watson Research Center where he has conducted cutting edge AI and Machine Learning research for the past ten years. Varshney also serves as co-director of IBM’s Science for Social Good program. Varshney received both a Masters in Science and a Ph.D. in electrical Engineering and Computer Science from MIT. In addition to writing numerous articles on AI, Varshney helped develop AI Fairness 360-a comprehensive open-source toolkit of metrics to check for unwanted bias in datasets and machine learning models. Varshney recently released his book Trust in Machine Learning.

----

To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Dr. Kush R. Varshney is a distinguished research staff member and manager in IBM Research AI at the Thomas J. Watson Research Center where he has conducted cutting edge AI and Machine Learning research for the past ten years. Varshney also serves as co-di</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Robert LoCascio: Why I co-founded EqualAI</title>
      <itunes:episode>10</itunes:episode>
      <podcast:episode>10</podcast:episode>
      <itunes:title>Robert LoCascio: Why I co-founded EqualAI</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">e9f87dc0-48e5-4d52-8167-218232d767e5</guid>
      <link>https://share.transistor.fm/s/523c1873</link>
      <description>
        <![CDATA[Rob LoCascio is the founder of LivePerson, Inc. and has been its chief executive officer since its inception in 1995, making him one of the longest-standing founding CEOs of a tech company today. As the inventor of online chat for brands, Rob disrupted the way people communicate with companies around the world. He is a founding member of EqualAI, which works with companies, policy makers, and experts to reduce bias in AI, and the NYC Entrepreneurs Council of the Partnership for New York City. In 2001, Rob started the Dream Big Foundation with its first program, FeedingNYC. As someone who has been on the entrepreneurial journey for over two decades, Rob's mission is to inspire and help others who are on that same path.

----

To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Rob LoCascio is the founder of LivePerson, Inc. and has been its chief executive officer since its inception in 1995, making him one of the longest-standing founding CEOs of a tech company today. As the inventor of online chat for brands, Rob disrupted the way people communicate with companies around the world. He is a founding member of EqualAI, which works with companies, policy makers, and experts to reduce bias in AI, and the NYC Entrepreneurs Council of the Partnership for New York City. In 2001, Rob started the Dream Big Foundation with its first program, FeedingNYC. As someone who has been on the entrepreneurial journey for over two decades, Rob's mission is to inspire and help others who are on that same path.

----

To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 20 Apr 2021 10:50:11 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/523c1873/f415156b.mp3" length="31200473" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1946</itunes:duration>
      <itunes:summary>Rob LoCascio is the founder of LivePerson, Inc. and has been its chief executive officer since its inception in 1995, making him one of the longest-standing founding CEOs of a tech company today. As the inventor of online chat for brands, Rob disrupted the way people communicate with companies around the world. He is a founding member of EqualAI, which works with companies, policy makers, and experts to reduce bias in AI, and the NYC Entrepreneurs Council of the Partnership for New York City. In 2001, Rob started the Dream Big Foundation with its first program, FeedingNYC. As someone who has been on the entrepreneurial journey for over two decades, Rob's mission is to inspire and help others who are on that same path.

----

To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Rob LoCascio is the founder of LivePerson, Inc. and has been its chief executive officer since its inception in 1995, making him one of the longest-standing founding CEOs of a tech company today. As the inventor of online chat for brands, Rob disrupted th</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Malcolm Frank: How to advise your clients to successfully and responsibly navigate the digital age</title>
      <itunes:episode>9</itunes:episode>
      <podcast:episode>9</podcast:episode>
      <itunes:title>Malcolm Frank: How to advise your clients to successfully and responsibly navigate the digital age</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">5f7b5534-c2d5-4a17-b746-55d5dce70f7b</guid>
      <link>https://share.transistor.fm/s/a607d267</link>
      <description>
        <![CDATA[Malcolm Frank is the president of digital business and technology at Cognizant. Malcolm’s influence is wide ranging and evident across media. He has co-authored two best-selling books, “What to Do When Machines Do Everything” (2017) and “Code Halos” (2014) and authored numerous white papers focusing on the Future of Work. A highly sought-after speaker, Malcolm has presented at conclaves across the globe, including the World Economic Forum and the South by Southwest (SXSW) Conference. He is frequently quoted, is the subject of a Harvard Business School case study and was named “one of the most influential people in finance” by Risk Management magazine.

On this episode, we ask Malcolm how companies should navigate the digital age responsibly, with an eye toward increasing use of AI while reducing liability and harms.

----

To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Malcolm Frank is the president of digital business and technology at Cognizant. Malcolm’s influence is wide ranging and evident across media. He has co-authored two best-selling books, “What to Do When Machines Do Everything” (2017) and “Code Halos” (2014) and authored numerous white papers focusing on the Future of Work. A highly sought-after speaker, Malcolm has presented at conclaves across the globe, including the World Economic Forum and the South by Southwest (SXSW) Conference. He is frequently quoted, is the subject of a Harvard Business School case study and was named “one of the most influential people in finance” by Risk Management magazine.

On this episode, we ask Malcolm how companies should navigate the digital age responsibly, with an eye toward increasing use of AI while reducing liability and harms.

----

To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 13 Apr 2021 13:58:59 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/a607d267/380a06c8.mp3" length="35858035" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2237</itunes:duration>
      <itunes:summary>Malcolm Frank is the president of digital business and technology at Cognizant. Malcolm’s influence is wide ranging and evident across media. He has co-authored two best-selling books, “What to Do When Machines Do Everything” (2017) and “Code Halos” (2014) and authored numerous white papers focusing on the Future of Work. A highly sought-after speaker, Malcolm has presented at conclaves across the globe, including the World Economic Forum and the South by Southwest (SXSW) Conference. He is frequently quoted, is the subject of a Harvard Business School case study and was named “one of the most influential people in finance” by Risk Management magazine.

On this episode, we ask Malcolm how companies should navigate the digital age responsibly, with an eye toward increasing use of AI while reducing liability and harms.

----

To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Malcolm Frank is the president of digital business and technology at Cognizant. Malcolm’s influence is wide ranging and evident across media. He has co-authored two best-selling books, “What to Do When Machines Do Everything” (2017) and “Code Halos” (2014</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Christo Wilson: What is an algorithmic audit?</title>
      <itunes:episode>8</itunes:episode>
      <podcast:episode>8</podcast:episode>
      <itunes:title>Christo Wilson: What is an algorithmic audit?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">2fe385d7-07c7-4d2e-94b2-c606246294fd</guid>
      <link>https://share.transistor.fm/s/5f4c2033</link>
      <description>
        <![CDATA[Christo is an Associate Professor in the Khoury College of Computer Sciences at Northeastern University, a member of the Cybersecurity and Privacy Institute and the Director of the BS in Cybersecurity program in the College. He is a faculty associate at the Berkman Klein Center for Internet &amp; Society at Harvard University, and an affiliate member of the Center for Law, Innovation and Creativity at Northeastern University School of Law. His research investigates the sociotechnical systems that shape our lives using a multi-disciplinary approach. You can find more of his talks and cutting edge research here: https://cbw.sh/ 

----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Christo is an Associate Professor in the Khoury College of Computer Sciences at Northeastern University, a member of the Cybersecurity and Privacy Institute and the Director of the BS in Cybersecurity program in the College. He is a faculty associate at the Berkman Klein Center for Internet &amp; Society at Harvard University, and an affiliate member of the Center for Law, Innovation and Creativity at Northeastern University School of Law. His research investigates the sociotechnical systems that shape our lives using a multi-disciplinary approach. You can find more of his talks and cutting edge research here: https://cbw.sh/ 

----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Wed, 07 Apr 2021 15:13:09 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/5f4c2033/f4d89ba3.mp3" length="27484770" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1714</itunes:duration>
      <itunes:summary>Christo is an Associate Professor in the Khoury College of Computer Sciences at Northeastern University, a member of the Cybersecurity and Privacy Institute and the Director of the BS in Cybersecurity program in the College. He is a faculty associate at the Berkman Klein Center for Internet &amp;amp; Society at Harvard University, and an affiliate member of the Center for Law, Innovation and Creativity at Northeastern University School of Law. His research investigates the sociotechnical systems that shape our lives using a multi-disciplinary approach. You can find more of his talks and cutting edge research here: https://cbw.sh/ 

----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Christo is an Associate Professor in the Khoury College of Computer Sciences at Northeastern University, a member of the Cybersecurity and Privacy Institute and the Director of the BS in Cybersecurity program in the College. He is a faculty associate at t</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Judy Spitz: Do we have a pipeline problem?</title>
      <itunes:episode>7</itunes:episode>
      <podcast:episode>7</podcast:episode>
      <itunes:title>Judy Spitz: Do we have a pipeline problem?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">11d819e7-cba1-4297-b5f9-d868c442c1aa</guid>
      <link>https://share.transistor.fm/s/d2a9c2bb</link>
      <description>
        <![CDATA[Tune in to this week’s episode of "In AI we Trust?" to hear Dr. Judith Spitz, Founder and Executive Director of Break Through Tech and learn the often missed barrier she identified to getting women into tech and how she fixes it with her organization (hint: look in our own backyards).

Dr. Spitz was previously Chief Information Officer (CIO) of Verizon, and in 2016, devoted herself to helping women break into tech. She launched WiTNY, or the Women in Technology and Entrepreneurship in New York Initiative, and saw a 94 percent increase in the number of women graduating with computer science degrees from CUNY (City University of New York), their academic partner. In 2020, she founded Break Through Tech, which is building on WiTNY’s success in cities around the country, starting with Chicago.

----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Tune in to this week’s episode of "In AI we Trust?" to hear Dr. Judith Spitz, Founder and Executive Director of Break Through Tech and learn the often missed barrier she identified to getting women into tech and how she fixes it with her organization (hint: look in our own backyards).

Dr. Spitz was previously Chief Information Officer (CIO) of Verizon, and in 2016, devoted herself to helping women break into tech. She launched WiTNY, or the Women in Technology and Entrepreneurship in New York Initiative, and saw a 94 percent increase in the number of women graduating with computer science degrees from CUNY (City University of New York), their academic partner. In 2020, she founded Break Through Tech, which is building on WiTNY’s success in cities around the country, starting with Chicago.

----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 30 Mar 2021 13:49:53 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/d2a9c2bb/2b64568c.mp3" length="42128929" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2628</itunes:duration>
      <itunes:summary>Tune in to this week’s episode of "In AI we Trust?" to hear Dr. Judith Spitz, Founder and Executive Director of Break Through Tech and learn the often missed barrier she identified to getting women into tech and how she fixes it with her organization (hint: look in our own backyards).

Dr. Spitz was previously Chief Information Officer (CIO) of Verizon, and in 2016, devoted herself to helping women break into tech. She launched WiTNY, or the Women in Technology and Entrepreneurship in New York Initiative, and saw a 94 percent increase in the number of women graduating with computer science degrees from CUNY (City University of New York), their academic partner. In 2020, she founded Break Through Tech, which is building on WiTNY’s success in cities around the country, starting with Chicago.

----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Tune in to this week’s episode of "In AI we Trust?" to hear Dr. Judith Spitz, Founder and Executive Director of Break Through Tech and learn the often missed barrier she identified to getting women into tech and how she fixes it with her organization (hin</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Kathy Baxter: What to do before launching an ethical AI product</title>
      <itunes:episode>6</itunes:episode>
      <podcast:episode>6</podcast:episode>
      <itunes:title>Kathy Baxter: What to do before launching an ethical AI product</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">be9de8d0-f6ca-4053-a363-78ff7d2293f9</guid>
      <link>https://share.transistor.fm/s/7b431ced</link>
      <description>
        <![CDATA[Kathy Baxter is the principal architect of Ethical AI Practice at Salesforce. She develops research-informed best practices to educate Salesforce employees, customers, and the industry on the development of responsible AI. Prior to Salesforce, she worked at Google, eBay, and Oracle in User Experience Research. 

On this episode, we ask Kathy: What are the critical steps to take from an ethics perspective, to ensure your AI product is safe to launch?

----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Kathy Baxter is the principal architect of Ethical AI Practice at Salesforce. She develops research-informed best practices to educate Salesforce employees, customers, and the industry on the development of responsible AI. Prior to Salesforce, she worked at Google, eBay, and Oracle in User Experience Research. 

On this episode, we ask Kathy: What are the critical steps to take from an ethics perspective, to ensure your AI product is safe to launch?

----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Fri, 26 Mar 2021 15:54:11 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/7b431ced/07645b4b.mp3" length="27158657" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>1694</itunes:duration>
      <itunes:summary>Kathy Baxter is the principal architect of Ethical AI Practice at Salesforce. She develops research-informed best practices to educate Salesforce employees, customers, and the industry on the development of responsible AI. Prior to Salesforce, she worked at Google, eBay, and Oracle in User Experience Research. 

On this episode, we ask Kathy: What are the critical steps to take from an ethics perspective, to ensure your AI product is safe to launch?

----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Kathy Baxter is the principal architect of Ethical AI Practice at Salesforce. She develops research-informed best practices to educate Salesforce employees, customers, and the industry on the development of responsible AI. Prior to Salesforce, she worked </itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>BONUS with Rep. Yvette Clarke and Roger McNamee: "In AI We Trust?" Podcast Launch Hosted by the Georgetown University Law Center</title>
      <itunes:episode>5</itunes:episode>
      <podcast:episode>5</podcast:episode>
      <itunes:title>BONUS with Rep. Yvette Clarke and Roger McNamee: "In AI We Trust?" Podcast Launch Hosted by the Georgetown University Law Center</itunes:title>
      <itunes:episodeType>bonus</itunes:episodeType>
      <guid isPermaLink="false">214a9043-71e2-4900-9a98-db9460489759</guid>
      <link>https://share.transistor.fm/s/ca408967</link>
      <description>
        <![CDATA[We're excited to share this bonus episode, a recording from the podcast's launch event with special guests Roger McNamee and Congresswoman Yvette Clarke. Representing New York's 9th District, Congresswoman Clarke is a committed champion of fighting bias in AI and other forms of discrimination in tech. Roger McNamee is a longtime investor in tech and author of "Zucked," which sheds light on the dangers of tech that is unfettered and insufficiently regulated.

A special thanks to our friends at the Georgetown University Law Center for hosting this event.

----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[We're excited to share this bonus episode, a recording from the podcast's launch event with special guests Roger McNamee and Congresswoman Yvette Clarke. Representing New York's 9th District, Congresswoman Clarke is a committed champion of fighting bias in AI and other forms of discrimination in tech. Roger McNamee is a longtime investor in tech and author of "Zucked," which sheds light on the dangers of tech that is unfettered and insufficiently regulated.

A special thanks to our friends at the Georgetown University Law Center for hosting this event.

----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Wed, 24 Mar 2021 11:55:23 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/ca408967/298097a1.mp3" length="69609366" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>4347</itunes:duration>
      <itunes:summary>We're excited to share this bonus episode, a recording from the podcast's launch event with special guests Roger McNamee and Congresswoman Yvette Clarke. Representing New York's 9th District, Congresswoman Clarke is a committed champion of fighting bias in AI and other forms of discrimination in tech. Roger McNamee is a longtime investor in tech and author of "Zucked," which sheds light on the dangers of tech that is unfettered and insufficiently regulated.

A special thanks to our friends at the Georgetown University Law Center for hosting this event.

----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>We're excited to share this bonus episode, a recording from the podcast's launch event with special guests Roger McNamee and Congresswoman Yvette Clarke. Representing New York's 9th District, Congresswoman Clarke is a committed champion of fighting bias i</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Meredith Broussard: What makes unfettered AI so dangerous (and what can we do about it)?</title>
      <itunes:episode>4</itunes:episode>
      <podcast:episode>4</podcast:episode>
      <itunes:title>Meredith Broussard: What makes unfettered AI so dangerous (and what can we do about it)?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">8122eb67-9ca6-4eb1-8bec-43bf22f20bdd</guid>
      <link>https://share.transistor.fm/s/a243ba1e</link>
      <description>
        <![CDATA[Meredith Broussard is a computer scientist and data journalism professor at NYU. Her book, "Artificial Unintelligence: How Computers Misunderstand the World," explains the origins of AI and the subtle and not-so-subtle ways that women and people of color were excluded from its genesis.

On this episode, we ask Meredith: What makes unfettered AI so dangerous, and what can we do about it?
----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[Meredith Broussard is a computer scientist and data journalism professor at NYU. Her book, "Artificial Unintelligence: How Computers Misunderstand the World," explains the origins of AI and the subtle and not-so-subtle ways that women and people of color were excluded from its genesis.

On this episode, we ask Meredith: What makes unfettered AI so dangerous, and what can we do about it?
----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 16 Mar 2021 10:52:57 -0400</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/a243ba1e/bc1a54c0.mp3" length="56850590" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2367</itunes:duration>
      <itunes:summary>Meredith Broussard is a computer scientist and data journalism professor at NYU. Her book, "Artificial Unintelligence: How Computers Misunderstand the World," explains the origins of AI and the subtle and not-so-subtle ways that women and people of color were excluded from its genesis.

On this episode, we ask Meredith: What makes unfettered AI so dangerous, and what can we do about it?
----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>Meredith Broussard is a computer scientist and data journalism professor at NYU. Her book, "Artificial Unintelligence: How Computers Misunderstand the World," explains the origins of AI and the subtle and not-so-subtle ways that women and people of color </itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Bob Work: America is not prepared to compete in the AI-era</title>
      <itunes:episode>3</itunes:episode>
      <podcast:episode>3</podcast:episode>
      <itunes:title>Bob Work: America is not prepared to compete in the AI-era</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">2aa1a205-7e70-477a-a1e2-f3fa75621205</guid>
      <link>https://share.transistor.fm/s/fd6ef8ec</link>
      <description>
        <![CDATA[You can read the NSCAI report here: https://www.nscai.gov/2021-final-report/

Bob Work served as the Deputy Secretary of Defense from 2014 to 2017 and has a long history of service in the government and military before then. He is widely known for developing the Third Offset strategy. He is currently President of TeamWork, a consulting firm that specializes in national security affairs. And even more relevant to this discussion - he is the Vice-Chair of the National Security Commission on AI.
----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[You can read the NSCAI report here: https://www.nscai.gov/2021-final-report/

Bob Work served as the Deputy Secretary of Defense from 2014 to 2017 and has a long history of service in the government and military before then. He is widely known for developing the Third Offset strategy. He is currently President of TeamWork, a consulting firm that specializes in national security affairs. And even more relevant to this discussion - he is the Vice-Chair of the National Security Commission on AI.
----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Thu, 11 Mar 2021 08:00:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/fd6ef8ec/faf916e9.mp3" length="48517578" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2019</itunes:duration>
      <itunes:summary>You can read the NSCAI report here: https://www.nscai.gov/2021-final-report/

Bob Work served as the Deputy Secretary of Defense from 2014 to 2017 and has a long history of service in the government and military before then. He is widely known for developing the Third Offset strategy. He is currently President of TeamWork, a consulting firm that specializes in national security affairs. And even more relevant to this discussion - he is the Vice-Chair of the National Security Commission on AI.
----
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>You can read the NSCAI report here: https://www.nscai.gov/2021-final-report/

Bob Work served as the Deputy Secretary of Defense from 2014 to 2017 and has a long history of service in the government and military before then. He is widely known for devel</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Cathy O'Neil: Why should companies care about ethical AI?</title>
      <itunes:episode>2</itunes:episode>
      <podcast:episode>2</podcast:episode>
      <itunes:title>Cathy O'Neil: Why should companies care about ethical AI?</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">3d28c224-b028-4109-923f-affc3e0ba7b7</guid>
      <link>https://share.transistor.fm/s/8b4e5092</link>
      <description>
        <![CDATA[In this episode, Miriam and Mark are joined by Dr. Cathy. O'Neil, a mathematician, data scientist, and author. She is a matriarch in the exploding and significant field of algorithmic bias. Cathy has a Ph.D. in mathematics from Harvard taught at MIT and Barnard. She also founded and runs the algorithmic auditing company Orcaa. She also posted on her popular blog that you should all check out mathbabe.org and her Twitter @mathbabedotorg
---
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[In this episode, Miriam and Mark are joined by Dr. Cathy. O'Neil, a mathematician, data scientist, and author. She is a matriarch in the exploding and significant field of algorithmic bias. Cathy has a Ph.D. in mathematics from Harvard taught at MIT and Barnard. She also founded and runs the algorithmic auditing company Orcaa. She also posted on her popular blog that you should all check out mathbabe.org and her Twitter @mathbabedotorg
---
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 09 Mar 2021 13:00:00 -0500</pubDate>
      <author>Miriam Vogel</author>
      <enclosure url="https://media.transistor.fm/8b4e5092/04be513e.mp3" length="53858675" type="audio/mpeg"/>
      <itunes:author>Miriam Vogel</itunes:author>
      <itunes:duration>2242</itunes:duration>
      <itunes:summary>In this episode, Miriam and Mark are joined by Dr. Cathy. O'Neil, a mathematician, data scientist, and author. She is a matriarch in the exploding and significant field of algorithmic bias. Cathy has a Ph.D. in mathematics from Harvard taught at MIT and Barnard. She also founded and runs the algorithmic auditing company Orcaa. She also posted on her popular blog that you should all check out mathbabe.org and her Twitter @mathbabedotorg
---
To learn more, visit our website: https://www.equalai.org/
You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>In this episode, Miriam and Mark are joined by Dr. Cathy. O'Neil, a mathematician, data scientist, and author. She is a matriarch in the exploding and significant field of algorithmic bias. Cathy has a Ph.D. in mathematics from Harvard taught at MIT and B</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Kurt Campbell: China, International Diplomacy and AI</title>
      <itunes:episode>1</itunes:episode>
      <podcast:episode>1</podcast:episode>
      <itunes:title>Kurt Campbell: China, International Diplomacy and AI</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">bda7cffc-dbdb-4271-809b-a14e1d129edd</guid>
      <link>https://share.transistor.fm/s/566506b9</link>
      <description>
        <![CDATA[A farewell episode to EqualAI Advisor, Kurt Campbell asking: How can we apply effective diplomacy strategies to international governance of AI? 

Note: Mark officially joins the podcast next episode!
---
To learn more, visit our website: https://www.equalai.org/

You can also follow us on Twitter: @ai_equal]]>
      </description>
      <content:encoded>
        <![CDATA[A farewell episode to EqualAI Advisor, Kurt Campbell asking: How can we apply effective diplomacy strategies to international governance of AI? 

Note: Mark officially joins the podcast next episode!
---
To learn more, visit our website: https://www.equalai.org/

You can also follow us on Twitter: @ai_equal]]>
      </content:encoded>
      <pubDate>Tue, 09 Mar 2021 09:00:00 -0500</pubDate>
      <author>Equal AI &amp; World Economic Forum</author>
      <enclosure url="https://media.transistor.fm/566506b9/ecdeb187.mp3" length="62079674" type="audio/mpeg"/>
      <itunes:author>Equal AI &amp; World Economic Forum</itunes:author>
      <itunes:duration>2584</itunes:duration>
      <itunes:summary>A farewell episode to EqualAI Advisor, Kurt Campbell asking: How can we apply effective diplomacy strategies to international governance of AI? 

Note: Mark officially joins the podcast next episode!
---
To learn more, visit our website: https://www.equalai.org/

You can also follow us on Twitter: @ai_equal</itunes:summary>
      <itunes:subtitle>A farewell episode to EqualAI Advisor, Kurt Campbell asking: How can we apply effective diplomacy strategies to international governance of AI? 

Note: Mark officially joins the podcast next episode!
---
To learn more, visit our website: https://www.e</itunes:subtitle>
      <itunes:keywords>technology, AI Governance, AI, Tech, Artificial Intelligence, AI literacy, AI Policy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Welcome to In AI We Trust?</title>
      <itunes:title>Welcome to In AI We Trust?</itunes:title>
      <itunes:episodeType>trailer</itunes:episodeType>
      <guid isPermaLink="false">918e239c-6808-4fe0-b925-5a81f0d431fe</guid>
      <link>https://share.transistor.fm/s/d07b60b9</link>
      <description>
        <![CDATA[<p><strong>The EqualAI podcast launches March 10th, subscribe wherever you get your podcasts so you don't miss out!</strong></p><p>To learn more, visit our website:<a href="https://www.equalai.org/"> https://www.equalai.org/</a></p><p>You can also follow us on Twitter: <a href="https://twitter.com/ai_equal">@ai_equal</a></p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p><strong>The EqualAI podcast launches March 10th, subscribe wherever you get your podcasts so you don't miss out!</strong></p><p>To learn more, visit our website:<a href="https://www.equalai.org/"> https://www.equalai.org/</a></p><p>You can also follow us on Twitter: <a href="https://twitter.com/ai_equal">@ai_equal</a></p>]]>
      </content:encoded>
      <pubDate>Tue, 02 Mar 2021 14:31:45 -0500</pubDate>
      <author>EqualAI &amp; World Economic Forum</author>
      <enclosure url="https://media.transistor.fm/d07b60b9/780d7f19.mp3" length="3306645" type="audio/mpeg"/>
      <itunes:author>EqualAI &amp; World Economic Forum</itunes:author>
      <itunes:duration>136</itunes:duration>
      <itunes:summary>Here's a preview of our new podcast launching on March 10th.</itunes:summary>
      <itunes:subtitle>Here's a preview of our new podcast launching on March 10th.</itunes:subtitle>
      <itunes:keywords>technology, ethics, AI, artificial intelligence</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
  </channel>
</rss>
