<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet href="/stylesheet.xsl" type="text/xsl"?>
<rss version="2.0" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:sy="http://purl.org/rss/1.0/modules/syndication/" xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:podcast="https://podcastindex.org/namespace/1.0">
  <channel>
    <atom:link rel="self" type="application/atom+xml" href="https://feeds.transistor.fm/impact-ai" title="MP3 Audio"/>
    <atom:link rel="hub" href="https://pubsubhubbub.appspot.com/"/>
    <podcast:podping usesPodping="true"/>
    <title>Impact AI</title>
    <generator>Transistor (https://transistor.fm)</generator>
    <itunes:new-feed-url>https://feeds.transistor.fm/impact-ai</itunes:new-feed-url>
    <description>Learn how to build a mission-driven machine learning company from the innovators and entrepreneurs who are leading the way. A weekly show about the intersection of ML and business – particularly startups. We discuss the challenges and best practices for working with data, mitigating bias, dealing with regulatory processes, collaborating across disciplines, recruiting and onboarding, maximizing impact, and more.</description>
    <copyright>© 2023 Pixel Scientia Labs, LLC</copyright>
    <podcast:guid>eee0e0f4-7914-5ba5-b2a5-e2234b365707</podcast:guid>
    <podcast:locked owner="heather@pixelscientia.com">no</podcast:locked>
    <podcast:trailer pubdate="Tue, 04 Oct 2022 22:42:21 -0400" url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/1e71a21e/09c8c2c8.mp3" length="1999494" type="audio/mpeg">Welcome to Impact AI</podcast:trailer>
    <language>en</language>
    <pubDate>Mon, 16 Jun 2025 06:00:04 -0400</pubDate>
    <lastBuildDate>Tue, 02 Dec 2025 19:50:09 -0500</lastBuildDate>
    <link>http://pixelscientia.com/podcast</link>
    
    <itunes:category text="Technology"/>
    <itunes:category text="Business"/>
    <itunes:type>episodic</itunes:type>
    <itunes:author>Heather D. Couture</itunes:author>
    <itunes:image href="https://img.transistor.fm/gWRL6rl3h46Hm6TBosou6oNRky8r3Z6MqIs9LIZmh8s/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9zaG93/LzM1MDE3LzE2NjQ5/MjYwMjMtYXJ0d29y/ay5qcGc.jpg"/>
    <itunes:summary>Learn how to build a mission-driven machine learning company from the innovators and entrepreneurs who are leading the way. A weekly show about the intersection of ML and business – particularly startups. We discuss the challenges and best practices for working with data, mitigating bias, dealing with regulatory processes, collaborating across disciplines, recruiting and onboarding, maximizing impact, and more.</itunes:summary>
    <itunes:subtitle>Learn how to build a mission-driven machine learning company from the innovators and entrepreneurs who are leading the way.</itunes:subtitle>
    <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
    <itunes:owner>
      <itunes:name>Heather D. Couture</itunes:name>
    </itunes:owner>
    <itunes:complete>No</itunes:complete>
    <itunes:explicit>No</itunes:explicit>
    <item>
      <title>Impact AI Update: Summer Break &amp; Webinar Resources</title>
      <itunes:episode>126</itunes:episode>
      <podcast:episode>126</podcast:episode>
      <itunes:title>Impact AI Update: Summer Break &amp; Webinar Resources</itunes:title>
      <itunes:episodeType>bonus</itunes:episodeType>
      <guid isPermaLink="false">da199d3a-1f8f-4ef4-b891-9edc5f70b69b</guid>
      <link>https://share.transistor.fm/s/87ee3b62</link>
      <description>
        <![CDATA[<p>I will be taking a brief hiatus for the next three months. I’m going to be using this time to step back, reflect, and rework the format of the show to bring you even more valuable insights and engaging conversations. I’m looking forward to returning in the fall with fresh episodes, new guests, and even deeper dives into the challenges and opportunities shaping mission-driven machine learning-powered companies.</p><p>In the meantime, I'm thrilled to share another way you can continue to learn and engage with the world of AI through Pixel Scientia Labs. While the podcast is on pause, I invite you to explore our <strong>Webinar Initiative</strong> at<a href="https://pixelscientia.com/webinars"> pixelscientia.com/webinars</a>.</p><p><strong>Links:</strong></p><ul><li><a href="https://pixelscientia.com/webinars/">Webinars from Pixel Scientia</a></li></ul>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>I will be taking a brief hiatus for the next three months. I’m going to be using this time to step back, reflect, and rework the format of the show to bring you even more valuable insights and engaging conversations. I’m looking forward to returning in the fall with fresh episodes, new guests, and even deeper dives into the challenges and opportunities shaping mission-driven machine learning-powered companies.</p><p>In the meantime, I'm thrilled to share another way you can continue to learn and engage with the world of AI through Pixel Scientia Labs. While the podcast is on pause, I invite you to explore our <strong>Webinar Initiative</strong> at<a href="https://pixelscientia.com/webinars"> pixelscientia.com/webinars</a>.</p><p><strong>Links:</strong></p><ul><li><a href="https://pixelscientia.com/webinars/">Webinars from Pixel Scientia</a></li></ul>]]>
      </content:encoded>
      <pubDate>Mon, 16 Jun 2025 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/87ee3b62/b2ee36a7.mp3" length="2316495" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/c8_LHDWNkr-E4rMv3WL_IsQqCiZdlC_koVHiuPn53qU/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS81MGFi/MDBjY2E0N2EyOTAw/YmEzNTI3YTMxZDNi/ZjU5NC5qcGc.jpg"/>
      <itunes:duration>143</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>I will be taking a brief hiatus for the next three months. I’m going to be using this time to step back, reflect, and rework the format of the show to bring you even more valuable insights and engaging conversations. I’m looking forward to returning in the fall with fresh episodes, new guests, and even deeper dives into the challenges and opportunities shaping mission-driven machine learning-powered companies.</p><p>In the meantime, I'm thrilled to share another way you can continue to learn and engage with the world of AI through Pixel Scientia Labs. While the podcast is on pause, I invite you to explore our <strong>Webinar Initiative</strong> at<a href="https://pixelscientia.com/webinars"> pixelscientia.com/webinars</a>.</p><p><strong>Links:</strong></p><ul><li><a href="https://pixelscientia.com/webinars/">Webinars from Pixel Scientia</a></li></ul>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, Impact AI</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/87ee3b62/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Advancing Breast Cancer Screening with Nico Karssemeijer from ScreenPoint Medical</title>
      <itunes:episode>125</itunes:episode>
      <podcast:episode>125</podcast:episode>
      <itunes:title>Advancing Breast Cancer Screening with Nico Karssemeijer from ScreenPoint Medical</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">ff44d062-2543-43ca-8ad8-fa0d6729713b</guid>
      <link>https://pixelscientia.com/podcast/advancing-breast-cancer-screening-with-nico-karssemeijer-from-screenpoint-medical/</link>
      <description>
        <![CDATA[<p>What role can artificial intelligence play in detecting breast cancer earlier, when it's most treatable? In this episode of Impact AI, we hear from Nico Karssemeijer, Chief Science Officer of ScreenPoint Medical, about how his team is using AI to transform breast cancer screening. Drawing on more than four decades of experience in medical imaging, Nico shares how ScreenPoint’s AI tools assist radiologists by analyzing mammograms, highlighting suspicious areas, and even learning from years of patient data. The conversation explores what it takes to build trustworthy medical AI, overcome challenges with data diversity and device bias, and the importance of clinical validation. To find out how AI is being integrated into real-world healthcare to improve outcomes (and what goes into building a successful AI-powered medical company), tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>What led Nico to turn decades of research into a breast imaging AI startup.</li><li>How ScreenPoint uses AI to support radiologists in early detection.</li><li>Challenges of working with diverse data from different imaging devices.</li><li>The importance of training models with clean, representative data.</li><li>Strategies for reducing bias across vendors and populations.</li><li>How independent, real-world validation drives trust and clinical adoption.</li><li>Finding a balance between model accuracy and explainability.</li><li>Why domain expertise is crucial for building a successful AI-powered startup.</li><li>Driving adoption in medical AI through clinical partnerships and rigorous trials.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“It’s amazing how much more information you can get out of the mammograms [using AI]. That surprises me all the time.” — Nico Karssemeijer</p><p><br></p><p>“You can't just say, ‘This mammogram is abnormal,’ because then [the radiologists] are puzzled. – The algorithm is getting so good that it identifies areas the radiologists would probably not see by themselves. – You have to – mark the area in the exam where a lesion is found.” — Nico Karssemeijer</p><p><br></p><p>“It's incredibly important to have enough domain expertise when you start a company, because it's easy to fail because you don't understand well enough what the customer wants [or] where the field is going.” — Nico Karssemeijer</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://screenpoint-medical.com/management-staff/nico-karssemeijer-2/">Nico Karssemeijer</a></p><p><a href="https://screenpoint-medical.com/">ScreenPoint Medical</a></p><p><a href="https://www.linkedin.com/in/nico-karssemeijer-34685590/">Nico Karssemeijer on LinkedIn</a></p><p><a href="https://scholar.google.com/citations?user=ca7SqLQAAAAJ">Nico Karssemeijer on Google Scholar</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>What role can artificial intelligence play in detecting breast cancer earlier, when it's most treatable? In this episode of Impact AI, we hear from Nico Karssemeijer, Chief Science Officer of ScreenPoint Medical, about how his team is using AI to transform breast cancer screening. Drawing on more than four decades of experience in medical imaging, Nico shares how ScreenPoint’s AI tools assist radiologists by analyzing mammograms, highlighting suspicious areas, and even learning from years of patient data. The conversation explores what it takes to build trustworthy medical AI, overcome challenges with data diversity and device bias, and the importance of clinical validation. To find out how AI is being integrated into real-world healthcare to improve outcomes (and what goes into building a successful AI-powered medical company), tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>What led Nico to turn decades of research into a breast imaging AI startup.</li><li>How ScreenPoint uses AI to support radiologists in early detection.</li><li>Challenges of working with diverse data from different imaging devices.</li><li>The importance of training models with clean, representative data.</li><li>Strategies for reducing bias across vendors and populations.</li><li>How independent, real-world validation drives trust and clinical adoption.</li><li>Finding a balance between model accuracy and explainability.</li><li>Why domain expertise is crucial for building a successful AI-powered startup.</li><li>Driving adoption in medical AI through clinical partnerships and rigorous trials.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“It’s amazing how much more information you can get out of the mammograms [using AI]. That surprises me all the time.” — Nico Karssemeijer</p><p><br></p><p>“You can't just say, ‘This mammogram is abnormal,’ because then [the radiologists] are puzzled. – The algorithm is getting so good that it identifies areas the radiologists would probably not see by themselves. – You have to – mark the area in the exam where a lesion is found.” — Nico Karssemeijer</p><p><br></p><p>“It's incredibly important to have enough domain expertise when you start a company, because it's easy to fail because you don't understand well enough what the customer wants [or] where the field is going.” — Nico Karssemeijer</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://screenpoint-medical.com/management-staff/nico-karssemeijer-2/">Nico Karssemeijer</a></p><p><a href="https://screenpoint-medical.com/">ScreenPoint Medical</a></p><p><a href="https://www.linkedin.com/in/nico-karssemeijer-34685590/">Nico Karssemeijer on LinkedIn</a></p><p><a href="https://scholar.google.com/citations?user=ca7SqLQAAAAJ">Nico Karssemeijer on Google Scholar</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 02 Jun 2025 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/56416973/2412d638.mp3" length="30389448" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/-tVGE4_WT6l60_MND7jTLhNlBWrWNSTt9mxJpelK8Vg/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS80ODdl/NTU5OGEyNGI1N2I4/OGE2Y2ZmNjI5MDli/MDgwMy5qcGVn.jpg"/>
      <itunes:duration>1264</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>What role can artificial intelligence play in detecting breast cancer earlier, when it's most treatable? In this episode of Impact AI, we hear from Nico Karssemeijer, Chief Science Officer of ScreenPoint Medical, about how his team is using AI to transform breast cancer screening. Drawing on more than four decades of experience in medical imaging, Nico shares how ScreenPoint’s AI tools assist radiologists by analyzing mammograms, highlighting suspicious areas, and even learning from years of patient data. The conversation explores what it takes to build trustworthy medical AI, overcome challenges with data diversity and device bias, and the importance of clinical validation. To find out how AI is being integrated into real-world healthcare to improve outcomes (and what goes into building a successful AI-powered medical company), tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>What led Nico to turn decades of research into a breast imaging AI startup.</li><li>How ScreenPoint uses AI to support radiologists in early detection.</li><li>Challenges of working with diverse data from different imaging devices.</li><li>The importance of training models with clean, representative data.</li><li>Strategies for reducing bias across vendors and populations.</li><li>How independent, real-world validation drives trust and clinical adoption.</li><li>Finding a balance between model accuracy and explainability.</li><li>Why domain expertise is crucial for building a successful AI-powered startup.</li><li>Driving adoption in medical AI through clinical partnerships and rigorous trials.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“It’s amazing how much more information you can get out of the mammograms [using AI]. That surprises me all the time.” — Nico Karssemeijer</p><p><br></p><p>“You can't just say, ‘This mammogram is abnormal,’ because then [the radiologists] are puzzled. – The algorithm is getting so good that it identifies areas the radiologists would probably not see by themselves. – You have to – mark the area in the exam where a lesion is found.” — Nico Karssemeijer</p><p><br></p><p>“It's incredibly important to have enough domain expertise when you start a company, because it's easy to fail because you don't understand well enough what the customer wants [or] where the field is going.” — Nico Karssemeijer</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://screenpoint-medical.com/management-staff/nico-karssemeijer-2/">Nico Karssemeijer</a></p><p><a href="https://screenpoint-medical.com/">ScreenPoint Medical</a></p><p><a href="https://www.linkedin.com/in/nico-karssemeijer-34685590/">Nico Karssemeijer on LinkedIn</a></p><p><a href="https://scholar.google.com/citations?user=ca7SqLQAAAAJ">Nico Karssemeijer on Google Scholar</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, breast cancer, mammogram, radiology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/56416973/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Radiology Tools for Precision Medicine with Ángel Alberich-Bayarri from Quibim</title>
      <itunes:episode>124</itunes:episode>
      <podcast:episode>124</podcast:episode>
      <itunes:title>Radiology Tools for Precision Medicine with Ángel Alberich-Bayarri from Quibim</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">5dbb810f-0931-4757-8684-06eaf84cedbc</guid>
      <link>https://pixelscientia.com/podcast/radiology-tools-for-precision-medicine-with-angel-alberich-bayarri-from-quibim/</link>
      <description>
        <![CDATA[<p>How can we harness medical imaging and artificial intelligence to shift healthcare from reactive to predictive? In this episode, I sit down with Ángel Alberich-Bayarri to discuss how artificial intelligence is revolutionizing radiology and precision medicine. Ángel is the CEO of Quibim, a company recognized globally for its AI-powered tools that turn radiological scans into predictive biomarkers, enabling more precise diagnoses and personalized treatments.</p><p>In our conversation, we hear how his early work in radiology and engineering led to the founding of Quibim and how the company’s AI-based technology transforms medical images into predictive biomarkers. We unpack the challenges of data heterogeneity, how Quibim tackles image harmonization using self-supervised learning, and why accounting for regulations is critical when building healthcare AI products. Ángel also shares his perspective on the value of model explainability, the concept of digital twins, and the future of preventative imaging. Join us to discover how AI is disrupting clinical decision-making and preventive healthcare with Ángel Alberich-Bayarri.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Hear about Ángel’s background and how his career led to founding Quibim.</li><li>Find out how Quibim turns radiology images into predictive clinical insights.</li><li>Different use cases of Quibim’s technology and why biopsy data is important.</li><li>He explains why Quibim avoids relying solely on radiologist annotations.</li><li>Challenges of using medical imaging: data fragmentation and scanner variability.</li><li>Explore Quibim’s self-supervised image learning harmonization techniques.</li><li>How Quibim increases the explainability of the model while maintaining accuracy.</li><li>Why understanding clinical workflows and radiologist adoption behavior is critical.</li><li>Uncover how regulations influence the development of Quibim’s technology.</li><li>Ángel’s advice for entrepreneurs and leaders of AI-powered startups.</li><li>Quibim’s plans for predictive modeling, digital twins, and AI for preventative medicine.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We would like AI to be able to mine all this hidden information we have right now in the images. Our vision is long-term, being able to understand what is happening until this point within the human body.” — Ángel Alberich-Bayarri</p><p><br></p><p>“What [Quibim is] investing in is the next frontier that not only detects and diagnoses disease, but also predicts or prognoses what is going to happen.” — Ángel Alberich-Bayarri</p><p><br></p><p>“Human behavior has a lot of nuances that need to be appreciated when AI is adopted.” — Ángel Alberich-Bayarri</p><p><br></p><p>“The bolder the claims you make, it’s the higher level of evidence you need to achieve.” — Ángel Alberich-Bayarri</p><p><br></p><p>“Taking care of health before we have symptoms, it’s just going to be a growing business, and therefore, a lot of AI tools will be needed to understand our inner us.” — Ángel Alberich-Bayarri</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/angelalberich/">Ángel Alberich-Bayarri on LinkedIn</a></p><p><a href="https://x.com/aalberich">Ángel Alberich-Bayarri on X</a></p><p><a href="https://quibim.com">Quibim</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>How can we harness medical imaging and artificial intelligence to shift healthcare from reactive to predictive? In this episode, I sit down with Ángel Alberich-Bayarri to discuss how artificial intelligence is revolutionizing radiology and precision medicine. Ángel is the CEO of Quibim, a company recognized globally for its AI-powered tools that turn radiological scans into predictive biomarkers, enabling more precise diagnoses and personalized treatments.</p><p>In our conversation, we hear how his early work in radiology and engineering led to the founding of Quibim and how the company’s AI-based technology transforms medical images into predictive biomarkers. We unpack the challenges of data heterogeneity, how Quibim tackles image harmonization using self-supervised learning, and why accounting for regulations is critical when building healthcare AI products. Ángel also shares his perspective on the value of model explainability, the concept of digital twins, and the future of preventative imaging. Join us to discover how AI is disrupting clinical decision-making and preventive healthcare with Ángel Alberich-Bayarri.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Hear about Ángel’s background and how his career led to founding Quibim.</li><li>Find out how Quibim turns radiology images into predictive clinical insights.</li><li>Different use cases of Quibim’s technology and why biopsy data is important.</li><li>He explains why Quibim avoids relying solely on radiologist annotations.</li><li>Challenges of using medical imaging: data fragmentation and scanner variability.</li><li>Explore Quibim’s self-supervised image learning harmonization techniques.</li><li>How Quibim increases the explainability of the model while maintaining accuracy.</li><li>Why understanding clinical workflows and radiologist adoption behavior is critical.</li><li>Uncover how regulations influence the development of Quibim’s technology.</li><li>Ángel’s advice for entrepreneurs and leaders of AI-powered startups.</li><li>Quibim’s plans for predictive modeling, digital twins, and AI for preventative medicine.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We would like AI to be able to mine all this hidden information we have right now in the images. Our vision is long-term, being able to understand what is happening until this point within the human body.” — Ángel Alberich-Bayarri</p><p><br></p><p>“What [Quibim is] investing in is the next frontier that not only detects and diagnoses disease, but also predicts or prognoses what is going to happen.” — Ángel Alberich-Bayarri</p><p><br></p><p>“Human behavior has a lot of nuances that need to be appreciated when AI is adopted.” — Ángel Alberich-Bayarri</p><p><br></p><p>“The bolder the claims you make, it’s the higher level of evidence you need to achieve.” — Ángel Alberich-Bayarri</p><p><br></p><p>“Taking care of health before we have symptoms, it’s just going to be a growing business, and therefore, a lot of AI tools will be needed to understand our inner us.” — Ángel Alberich-Bayarri</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/angelalberich/">Ángel Alberich-Bayarri on LinkedIn</a></p><p><a href="https://x.com/aalberich">Ángel Alberich-Bayarri on X</a></p><p><a href="https://quibim.com">Quibim</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 19 May 2025 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/a782b1ac/47d68220.mp3" length="47526821" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/hfFROx9523o-VL0gO89_czIKyUvNuH94lqbGGroStE4/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS8wMjcy/MzM1NmY2ZmMwYzMy/Y2MwOTJhNTBiMjM1/OTE4YS5qcGVn.jpg"/>
      <itunes:duration>1978</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>How can we harness medical imaging and artificial intelligence to shift healthcare from reactive to predictive? In this episode, I sit down with Ángel Alberich-Bayarri to discuss how artificial intelligence is revolutionizing radiology and precision medicine. Ángel is the CEO of Quibim, a company recognized globally for its AI-powered tools that turn radiological scans into predictive biomarkers, enabling more precise diagnoses and personalized treatments.</p><p>In our conversation, we hear how his early work in radiology and engineering led to the founding of Quibim and how the company’s AI-based technology transforms medical images into predictive biomarkers. We unpack the challenges of data heterogeneity, how Quibim tackles image harmonization using self-supervised learning, and why accounting for regulations is critical when building healthcare AI products. Ángel also shares his perspective on the value of model explainability, the concept of digital twins, and the future of preventative imaging. Join us to discover how AI is disrupting clinical decision-making and preventive healthcare with Ángel Alberich-Bayarri.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Hear about Ángel’s background and how his career led to founding Quibim.</li><li>Find out how Quibim turns radiology images into predictive clinical insights.</li><li>Different use cases of Quibim’s technology and why biopsy data is important.</li><li>He explains why Quibim avoids relying solely on radiologist annotations.</li><li>Challenges of using medical imaging: data fragmentation and scanner variability.</li><li>Explore Quibim’s self-supervised image learning harmonization techniques.</li><li>How Quibim increases the explainability of the model while maintaining accuracy.</li><li>Why understanding clinical workflows and radiologist adoption behavior is critical.</li><li>Uncover how regulations influence the development of Quibim’s technology.</li><li>Ángel’s advice for entrepreneurs and leaders of AI-powered startups.</li><li>Quibim’s plans for predictive modeling, digital twins, and AI for preventative medicine.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We would like AI to be able to mine all this hidden information we have right now in the images. Our vision is long-term, being able to understand what is happening until this point within the human body.” — Ángel Alberich-Bayarri</p><p><br></p><p>“What [Quibim is] investing in is the next frontier that not only detects and diagnoses disease, but also predicts or prognoses what is going to happen.” — Ángel Alberich-Bayarri</p><p><br></p><p>“Human behavior has a lot of nuances that need to be appreciated when AI is adopted.” — Ángel Alberich-Bayarri</p><p><br></p><p>“The bolder the claims you make, it’s the higher level of evidence you need to achieve.” — Ángel Alberich-Bayarri</p><p><br></p><p>“Taking care of health before we have symptoms, it’s just going to be a growing business, and therefore, a lot of AI tools will be needed to understand our inner us.” — Ángel Alberich-Bayarri</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/angelalberich/">Ángel Alberich-Bayarri on LinkedIn</a></p><p><a href="https://x.com/aalberich">Ángel Alberich-Bayarri on X</a></p><p><a href="https://quibim.com">Quibim</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, radiology, precision medicine</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/a782b1ac/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Simulating Clinical Trials with Orr Inbarr from Quant Health</title>
      <itunes:episode>123</itunes:episode>
      <podcast:episode>123</podcast:episode>
      <itunes:title>Simulating Clinical Trials with Orr Inbarr from Quant Health</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">dbf88ee8-1520-4289-b6d5-aa5f6fc2fa59</guid>
      <link>https://pixelscientia.com/podcast/simulating-clinical-trials-with-orr-inbarr-from-quant-health/</link>
      <description>
        <![CDATA[<p>Drug development is notoriously time-consuming and expensive, but what if we could simulate clinical trials before they even begin? Orr Inbar, Co-Founder and CEO of QuantHealth, joins me to explore how his team is doing just that. By simulating trials with AI-native models, QuantHealth helps pharmaceutical companies make better decisions about how to design trials and test drugs.</p><p>Orr shares how QuantHealth uses real-world patient data and detailed drug biology to build deep-learning models capable of forecasting patient responses to new therapies. He breaks down their biggest challenges, like the complexities of messy healthcare data, hidden biases, and the importance of domain knowledge when building AI tools for regulated environments. He also shares a key lesson for any AI startup: focus on solving real problems, not just building clever models. Tune in for a fascinating look at how AI is reshaping drug development and what the future of clinical trials could look like!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Some background on Orr, his parents, and how he founded QuantHealth.</li><li>Key problems QuantHealth is solving as a clinical trial simulation company.</li><li>A breakdown of the biggest challenges facing clinical trials.</li><li>Why we need to improve data-driven trials of drugs.</li><li>How QuantHealth builds their foundation models for trial simulations.</li><li>Examples of the type of predictions their models make in clinical contexts.</li><li>How they use patient and drug data to make predictions and build “digital drugs”.</li><li>Key challenges of working with these different types of data.</li><li>Methods for combating bias, including the use of exogenous data. </li><li>How they incorporate the medical context in model development.</li><li>QuantHealth’s validation process: how they meet rigorous industry standards.</li><li>Orr’s advice to other AI startups on creating value, not just smart models.</li><li>Where you can expect to see QuantHeath in the next three to five years.</li></ul><p><br><strong>Quotes:</strong></p><p>“There is a constant desire in drug development and pharmaceutical research to get your hands on more data. This makes sense since it's a very data-driven industry. But at the same time, there was a mismatch there, because there's actually quite a lot of data already out there.” — Orr Inbar</p><p><br></p><p>“How do we bridge the gap between the data that we already have and the insights that we need to generate to answer those questions?” — Orr Inbar</p><p><br></p><p>“If you take a step back and look at how drugs are being developed today and with an emphasis on clinical trials, we're essentially doing the same things that we were doing 50 years ago.” — Orr Inbar</p><p><br></p><p>“Even in a world of GenAI, you can't just snap your fingers and get the solution. It requires a lot of work to structure and harmonize the data.” — Orr Inbar</p><p><br></p><p>“Every trial that we simulate, we first go through a data enrichment process where we look for the latest information in terms of research publications, recently completed trials that are relevant to our drug of interest, and incorporate that data into our data sets.” — Orr Inbar</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/orr-inbar-93346158/?originalSubdomain=il">Orr Inbar on LinkedIn<br></a><a href="https://quanthealth.ai/">QuantHealth<br></a><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Drug development is notoriously time-consuming and expensive, but what if we could simulate clinical trials before they even begin? Orr Inbar, Co-Founder and CEO of QuantHealth, joins me to explore how his team is doing just that. By simulating trials with AI-native models, QuantHealth helps pharmaceutical companies make better decisions about how to design trials and test drugs.</p><p>Orr shares how QuantHealth uses real-world patient data and detailed drug biology to build deep-learning models capable of forecasting patient responses to new therapies. He breaks down their biggest challenges, like the complexities of messy healthcare data, hidden biases, and the importance of domain knowledge when building AI tools for regulated environments. He also shares a key lesson for any AI startup: focus on solving real problems, not just building clever models. Tune in for a fascinating look at how AI is reshaping drug development and what the future of clinical trials could look like!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Some background on Orr, his parents, and how he founded QuantHealth.</li><li>Key problems QuantHealth is solving as a clinical trial simulation company.</li><li>A breakdown of the biggest challenges facing clinical trials.</li><li>Why we need to improve data-driven trials of drugs.</li><li>How QuantHealth builds their foundation models for trial simulations.</li><li>Examples of the type of predictions their models make in clinical contexts.</li><li>How they use patient and drug data to make predictions and build “digital drugs”.</li><li>Key challenges of working with these different types of data.</li><li>Methods for combating bias, including the use of exogenous data. </li><li>How they incorporate the medical context in model development.</li><li>QuantHealth’s validation process: how they meet rigorous industry standards.</li><li>Orr’s advice to other AI startups on creating value, not just smart models.</li><li>Where you can expect to see QuantHeath in the next three to five years.</li></ul><p><br><strong>Quotes:</strong></p><p>“There is a constant desire in drug development and pharmaceutical research to get your hands on more data. This makes sense since it's a very data-driven industry. But at the same time, there was a mismatch there, because there's actually quite a lot of data already out there.” — Orr Inbar</p><p><br></p><p>“How do we bridge the gap between the data that we already have and the insights that we need to generate to answer those questions?” — Orr Inbar</p><p><br></p><p>“If you take a step back and look at how drugs are being developed today and with an emphasis on clinical trials, we're essentially doing the same things that we were doing 50 years ago.” — Orr Inbar</p><p><br></p><p>“Even in a world of GenAI, you can't just snap your fingers and get the solution. It requires a lot of work to structure and harmonize the data.” — Orr Inbar</p><p><br></p><p>“Every trial that we simulate, we first go through a data enrichment process where we look for the latest information in terms of research publications, recently completed trials that are relevant to our drug of interest, and incorporate that data into our data sets.” — Orr Inbar</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/orr-inbar-93346158/?originalSubdomain=il">Orr Inbar on LinkedIn<br></a><a href="https://quanthealth.ai/">QuantHealth<br></a><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 05 May 2025 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/5292fc5f/01d3e432.mp3" length="30989761" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/rn-bqqgvD4_oMK82dLcD7jJnjw7w801CTmalM3W0eL0/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS82YTRm/Zjg4MmVhNGEyMTUz/NmU3ZjdlNGFhMDE4/MTcxMy5qcGVn.jpg"/>
      <itunes:duration>1289</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Drug development is notoriously time-consuming and expensive, but what if we could simulate clinical trials before they even begin? Orr Inbar, Co-Founder and CEO of QuantHealth, joins me to explore how his team is doing just that. By simulating trials with AI-native models, QuantHealth helps pharmaceutical companies make better decisions about how to design trials and test drugs.</p><p>Orr shares how QuantHealth uses real-world patient data and detailed drug biology to build deep-learning models capable of forecasting patient responses to new therapies. He breaks down their biggest challenges, like the complexities of messy healthcare data, hidden biases, and the importance of domain knowledge when building AI tools for regulated environments. He also shares a key lesson for any AI startup: focus on solving real problems, not just building clever models. Tune in for a fascinating look at how AI is reshaping drug development and what the future of clinical trials could look like!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Some background on Orr, his parents, and how he founded QuantHealth.</li><li>Key problems QuantHealth is solving as a clinical trial simulation company.</li><li>A breakdown of the biggest challenges facing clinical trials.</li><li>Why we need to improve data-driven trials of drugs.</li><li>How QuantHealth builds their foundation models for trial simulations.</li><li>Examples of the type of predictions their models make in clinical contexts.</li><li>How they use patient and drug data to make predictions and build “digital drugs”.</li><li>Key challenges of working with these different types of data.</li><li>Methods for combating bias, including the use of exogenous data. </li><li>How they incorporate the medical context in model development.</li><li>QuantHealth’s validation process: how they meet rigorous industry standards.</li><li>Orr’s advice to other AI startups on creating value, not just smart models.</li><li>Where you can expect to see QuantHeath in the next three to five years.</li></ul><p><br><strong>Quotes:</strong></p><p>“There is a constant desire in drug development and pharmaceutical research to get your hands on more data. This makes sense since it's a very data-driven industry. But at the same time, there was a mismatch there, because there's actually quite a lot of data already out there.” — Orr Inbar</p><p><br></p><p>“How do we bridge the gap between the data that we already have and the insights that we need to generate to answer those questions?” — Orr Inbar</p><p><br></p><p>“If you take a step back and look at how drugs are being developed today and with an emphasis on clinical trials, we're essentially doing the same things that we were doing 50 years ago.” — Orr Inbar</p><p><br></p><p>“Even in a world of GenAI, you can't just snap your fingers and get the solution. It requires a lot of work to structure and harmonize the data.” — Orr Inbar</p><p><br></p><p>“Every trial that we simulate, we first go through a data enrichment process where we look for the latest information in terms of research publications, recently completed trials that are relevant to our drug of interest, and incorporate that data into our data sets.” — Orr Inbar</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/orr-inbar-93346158/?originalSubdomain=il">Orr Inbar on LinkedIn<br></a><a href="https://quanthealth.ai/">QuantHealth<br></a><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, clinical trials, drug development</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/5292fc5f/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Early Wildfire Detection with Shahab Bahrami from SenseNet</title>
      <itunes:episode>122</itunes:episode>
      <podcast:episode>122</podcast:episode>
      <itunes:title>Early Wildfire Detection with Shahab Bahrami from SenseNet</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">7d4b2664-ceac-4740-8961-32ee43b90b06</guid>
      <link>https://pixelscientia.com/podcast/early-wildfire-detection-with-shahab-bahrami-from-sensenet/</link>
      <description>
        <![CDATA[<p>The recent destruction of the Pacific Palisades in Los Angeles was a brutal reminder of why we need robust early wildfire detection systems. Joining me today is Shahab Bahrami, the co-founder and CTO at SenseNet – a company that provides advanced AI-powered cameras and sensors to protect communities and valuable assets against wildfires.</p><p>Shahab is passionate about using interdisciplinary research to bridge the gap between machine learning and optimization, and he begins today’s conversation by detailing his professional background and how it led him to co-found SenseNet. Then, we unpack SenseNet and how its technology works, how it gathers data for its AI models, the challenges of relying on images and other sensor data to train machine learning models, and how SenseNet uses multiple sources to detect or define any one problem. To end, we learn why and how SenseNet uses various AI models in a single sensor, how it measures the overall impact of its tech, where the company plans to be in the next five years, and Shahab’s valuable advice for other leaders of AI-powered startups.     </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Shahab Bahrami walks us through his professional background and how it led to SenseNet. </li><li>The ins and outs of SenseNet and how its technology works. </li><li>How machine learning fits into SenseNet’s offerings, and how it gathers the necessary data. </li><li>The challenges of working with images and other sensor data to train models.  </li><li>How SenseNet integrates information from different sources to zero in on a single anomaly. </li><li>Understanding how it uses multiple AI models to adapt to variations post-installation. </li><li>How the system chooses which AI model to apply and when. </li><li>Shahab describes how his company measures the overall impact of its technology. </li><li>His advice to other leaders of AI-powered startups, and his five-year vision for SenseNet. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We have one of the most comprehensive wildfire detection solutions in the world, and it is proven by multiple, real-world projects.” — Shahab Bahrami</p><p><br></p><p>“Having separate AI models is the solution that we are now implementing.” — Shahab Bahrami</p><p><br></p><p>“For the sensor’s AI – because it is a semi-supervised AI, it automatically adapts itself to local conditions. It learns gradually what is normal and what is abnormal, and it is a continuous learning. It won’t stop.” — Shahab Bahrami</p><p><br></p><p>“AI changes fast. Every day we have a new AI engine, we have a new model, and leaders, I believe, need to stay updated and make sure their teams have the support and also the resources to keep innovating.” — Shahab Bahrami</p><p><br></p><p><strong>Links:</strong></p><p><a href="http://www.shahabbahrami.com/">Shahab Bahrami </a></p><p><a href="https://www.linkedin.com/in/shahab-bahrami-5437a845">Shahab Bahrami on LinkedIn</a></p><p><a href="https://www.sensenet.ca/">SenseNet</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>The recent destruction of the Pacific Palisades in Los Angeles was a brutal reminder of why we need robust early wildfire detection systems. Joining me today is Shahab Bahrami, the co-founder and CTO at SenseNet – a company that provides advanced AI-powered cameras and sensors to protect communities and valuable assets against wildfires.</p><p>Shahab is passionate about using interdisciplinary research to bridge the gap between machine learning and optimization, and he begins today’s conversation by detailing his professional background and how it led him to co-found SenseNet. Then, we unpack SenseNet and how its technology works, how it gathers data for its AI models, the challenges of relying on images and other sensor data to train machine learning models, and how SenseNet uses multiple sources to detect or define any one problem. To end, we learn why and how SenseNet uses various AI models in a single sensor, how it measures the overall impact of its tech, where the company plans to be in the next five years, and Shahab’s valuable advice for other leaders of AI-powered startups.     </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Shahab Bahrami walks us through his professional background and how it led to SenseNet. </li><li>The ins and outs of SenseNet and how its technology works. </li><li>How machine learning fits into SenseNet’s offerings, and how it gathers the necessary data. </li><li>The challenges of working with images and other sensor data to train models.  </li><li>How SenseNet integrates information from different sources to zero in on a single anomaly. </li><li>Understanding how it uses multiple AI models to adapt to variations post-installation. </li><li>How the system chooses which AI model to apply and when. </li><li>Shahab describes how his company measures the overall impact of its technology. </li><li>His advice to other leaders of AI-powered startups, and his five-year vision for SenseNet. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We have one of the most comprehensive wildfire detection solutions in the world, and it is proven by multiple, real-world projects.” — Shahab Bahrami</p><p><br></p><p>“Having separate AI models is the solution that we are now implementing.” — Shahab Bahrami</p><p><br></p><p>“For the sensor’s AI – because it is a semi-supervised AI, it automatically adapts itself to local conditions. It learns gradually what is normal and what is abnormal, and it is a continuous learning. It won’t stop.” — Shahab Bahrami</p><p><br></p><p>“AI changes fast. Every day we have a new AI engine, we have a new model, and leaders, I believe, need to stay updated and make sure their teams have the support and also the resources to keep innovating.” — Shahab Bahrami</p><p><br></p><p><strong>Links:</strong></p><p><a href="http://www.shahabbahrami.com/">Shahab Bahrami </a></p><p><a href="https://www.linkedin.com/in/shahab-bahrami-5437a845">Shahab Bahrami on LinkedIn</a></p><p><a href="https://www.sensenet.ca/">SenseNet</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 21 Apr 2025 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/d83ce8ff/1a1e7000.mp3" length="33480613" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/N0LY0QuhXvvzMCHY7tbon078zhwo5MwgcGs-2q3zW-E/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS84YjU0/MGM3MDU0ODgyODE0/YWI5YjY1MWRhYzZi/YjJjYy5qcGVn.jpg"/>
      <itunes:duration>1393</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>The recent destruction of the Pacific Palisades in Los Angeles was a brutal reminder of why we need robust early wildfire detection systems. Joining me today is Shahab Bahrami, the co-founder and CTO at SenseNet – a company that provides advanced AI-powered cameras and sensors to protect communities and valuable assets against wildfires.</p><p>Shahab is passionate about using interdisciplinary research to bridge the gap between machine learning and optimization, and he begins today’s conversation by detailing his professional background and how it led him to co-found SenseNet. Then, we unpack SenseNet and how its technology works, how it gathers data for its AI models, the challenges of relying on images and other sensor data to train machine learning models, and how SenseNet uses multiple sources to detect or define any one problem. To end, we learn why and how SenseNet uses various AI models in a single sensor, how it measures the overall impact of its tech, where the company plans to be in the next five years, and Shahab’s valuable advice for other leaders of AI-powered startups.     </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Shahab Bahrami walks us through his professional background and how it led to SenseNet. </li><li>The ins and outs of SenseNet and how its technology works. </li><li>How machine learning fits into SenseNet’s offerings, and how it gathers the necessary data. </li><li>The challenges of working with images and other sensor data to train models.  </li><li>How SenseNet integrates information from different sources to zero in on a single anomaly. </li><li>Understanding how it uses multiple AI models to adapt to variations post-installation. </li><li>How the system chooses which AI model to apply and when. </li><li>Shahab describes how his company measures the overall impact of its technology. </li><li>His advice to other leaders of AI-powered startups, and his five-year vision for SenseNet. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We have one of the most comprehensive wildfire detection solutions in the world, and it is proven by multiple, real-world projects.” — Shahab Bahrami</p><p><br></p><p>“Having separate AI models is the solution that we are now implementing.” — Shahab Bahrami</p><p><br></p><p>“For the sensor’s AI – because it is a semi-supervised AI, it automatically adapts itself to local conditions. It learns gradually what is normal and what is abnormal, and it is a continuous learning. It won’t stop.” — Shahab Bahrami</p><p><br></p><p>“AI changes fast. Every day we have a new AI engine, we have a new model, and leaders, I believe, need to stay updated and make sure their teams have the support and also the resources to keep innovating.” — Shahab Bahrami</p><p><br></p><p><strong>Links:</strong></p><p><a href="http://www.shahabbahrami.com/">Shahab Bahrami </a></p><p><a href="https://www.linkedin.com/in/shahab-bahrami-5437a845">Shahab Bahrami on LinkedIn</a></p><p><a href="https://www.sensenet.ca/">SenseNet</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, wildfires, wildfire detection</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/d83ce8ff/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Foundation Model Series: Empowering Drug Discovery with Rick Schneider from Helical</title>
      <itunes:episode>121</itunes:episode>
      <podcast:episode>121</podcast:episode>
      <itunes:title>Foundation Model Series: Empowering Drug Discovery with Rick Schneider from Helical</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">70238dba-f61e-4455-b29c-b022fe287415</guid>
      <link>https://pixelscientia.com/podcast/empowering-drug-discovery-with-rick-schneider-from-helical/</link>
      <description>
        <![CDATA[<p>AI is transforming drug discovery by making biological data more accessible and actionable, bridging the gap between complex sequencing data and real-world therapeutic breakthroughs. As Rick Schneider puts it, it's all about leveraging powerful models to “build use cases that matter and bring value.”</p><p>In this episode of Impact AI, we hear from the CEO and Co-founder of Helical to find out how bio-foundation models are transforming pharmaceutical research. Rick shares how Helical’s AI platform enables drug discovery by leveraging biological sequencing data without requiring companies to build their own models from scratch. He also reveals the challenges of working with high-dimensional biological data, the power of model specialization for specific therapeutic areas, and the growing role of open-source AI in healthcare innovation.</p><p>Whether you're in biotech, AI, or simply curious about the future of medicine, this episode offers invaluable insights into how AI is shaping the next generation of drug discovery. Tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Rick, his engineering background, and Helical’s mission.</li><li>The challenges of leveraging biological foundation models for drug discovery.</li><li>Understanding biological sequencing data and its complexities.</li><li>Key technical challenges: messy datasets, long-range dependencies, and model architecture.</li><li>How Helix, Helical’s mRNA foundation model, competes with industry leaders.</li><li>Three key factors in building biological foundation models: data, compute, and talent</li><li>The shift from narrow AI to general-purpose AI in pharma.</li><li>Benchmarking and evaluating foundation models for different use cases.</li><li>Commercializing Helical’s platform through partnerships with pharma companies.</li><li>Insight into the role of open-source AI in advancing biological research.</li><li>The future of biological foundation models: scaling up for greater impact.</li><li>Rick’s vision for Helical as the backbone of in silico pharma labs.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“The question is, how do I leverage [powerful biological foundation models] and – build use cases that matter and bring value? Helical is building a therapeutic area, an agnostic AI platform that is empowering single-cell RNA and DNA bio foundation models for drug discovery.” — Rick Schneider</p><p><br></p><p>“In bio, you can still innovate on the architecture side and not simply [with] the scale of the models. It's not simply by throwing more compute at the models that you get to the very best outcomes.” — Rick Schneider</p><p><br></p><p>“Be okay with being different in your approach and accept [that you will] be contrarian to certain things.” — Rick Schneider</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/rick-schneider-464688140/">Rick Schneider on LinkedIn</a></p><p><a href="https://www.helical-ai.com/">Helical</a></p><p><a href="https://github.com/helicalAI/helical">Helical on GitHub</a></p><p><a href="https://huggingface.co/helical-ai">Helical on Hugging Face</a></p><p><a href="https://www.helical-ai.com/blog/helix-mrna-v0">Introducing Helix-mRNA-v0</a></p><p><a href="https://huggingface.co/helical-ai/helix-mRNA">Helix-mRNA</a></p><p><a href="https://doi.org/10.48550/arXiv.2502.13785">Helix-mRNA: A Hybrid Foundation Model For Full Sequence mRNA Therapeutics</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>AI is transforming drug discovery by making biological data more accessible and actionable, bridging the gap between complex sequencing data and real-world therapeutic breakthroughs. As Rick Schneider puts it, it's all about leveraging powerful models to “build use cases that matter and bring value.”</p><p>In this episode of Impact AI, we hear from the CEO and Co-founder of Helical to find out how bio-foundation models are transforming pharmaceutical research. Rick shares how Helical’s AI platform enables drug discovery by leveraging biological sequencing data without requiring companies to build their own models from scratch. He also reveals the challenges of working with high-dimensional biological data, the power of model specialization for specific therapeutic areas, and the growing role of open-source AI in healthcare innovation.</p><p>Whether you're in biotech, AI, or simply curious about the future of medicine, this episode offers invaluable insights into how AI is shaping the next generation of drug discovery. Tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Rick, his engineering background, and Helical’s mission.</li><li>The challenges of leveraging biological foundation models for drug discovery.</li><li>Understanding biological sequencing data and its complexities.</li><li>Key technical challenges: messy datasets, long-range dependencies, and model architecture.</li><li>How Helix, Helical’s mRNA foundation model, competes with industry leaders.</li><li>Three key factors in building biological foundation models: data, compute, and talent</li><li>The shift from narrow AI to general-purpose AI in pharma.</li><li>Benchmarking and evaluating foundation models for different use cases.</li><li>Commercializing Helical’s platform through partnerships with pharma companies.</li><li>Insight into the role of open-source AI in advancing biological research.</li><li>The future of biological foundation models: scaling up for greater impact.</li><li>Rick’s vision for Helical as the backbone of in silico pharma labs.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“The question is, how do I leverage [powerful biological foundation models] and – build use cases that matter and bring value? Helical is building a therapeutic area, an agnostic AI platform that is empowering single-cell RNA and DNA bio foundation models for drug discovery.” — Rick Schneider</p><p><br></p><p>“In bio, you can still innovate on the architecture side and not simply [with] the scale of the models. It's not simply by throwing more compute at the models that you get to the very best outcomes.” — Rick Schneider</p><p><br></p><p>“Be okay with being different in your approach and accept [that you will] be contrarian to certain things.” — Rick Schneider</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/rick-schneider-464688140/">Rick Schneider on LinkedIn</a></p><p><a href="https://www.helical-ai.com/">Helical</a></p><p><a href="https://github.com/helicalAI/helical">Helical on GitHub</a></p><p><a href="https://huggingface.co/helical-ai">Helical on Hugging Face</a></p><p><a href="https://www.helical-ai.com/blog/helix-mrna-v0">Introducing Helix-mRNA-v0</a></p><p><a href="https://huggingface.co/helical-ai/helix-mRNA">Helix-mRNA</a></p><p><a href="https://doi.org/10.48550/arXiv.2502.13785">Helix-mRNA: A Hybrid Foundation Model For Full Sequence mRNA Therapeutics</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 07 Apr 2025 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/5f4c6e90/c2a6f1d6.mp3" length="21388367" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/ZLHvIFF2VEB1lqwobQCVoD35QoQ5_XFneh8BRXh28Qs/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS85MmM3/ZjIzNTNlMDI0YWVk/NDlkY2Q3YWFhNWYw/ZTYxZC5qcGVn.jpg"/>
      <itunes:duration>1330</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>AI is transforming drug discovery by making biological data more accessible and actionable, bridging the gap between complex sequencing data and real-world therapeutic breakthroughs. As Rick Schneider puts it, it's all about leveraging powerful models to “build use cases that matter and bring value.”</p><p>In this episode of Impact AI, we hear from the CEO and Co-founder of Helical to find out how bio-foundation models are transforming pharmaceutical research. Rick shares how Helical’s AI platform enables drug discovery by leveraging biological sequencing data without requiring companies to build their own models from scratch. He also reveals the challenges of working with high-dimensional biological data, the power of model specialization for specific therapeutic areas, and the growing role of open-source AI in healthcare innovation.</p><p>Whether you're in biotech, AI, or simply curious about the future of medicine, this episode offers invaluable insights into how AI is shaping the next generation of drug discovery. Tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Rick, his engineering background, and Helical’s mission.</li><li>The challenges of leveraging biological foundation models for drug discovery.</li><li>Understanding biological sequencing data and its complexities.</li><li>Key technical challenges: messy datasets, long-range dependencies, and model architecture.</li><li>How Helix, Helical’s mRNA foundation model, competes with industry leaders.</li><li>Three key factors in building biological foundation models: data, compute, and talent</li><li>The shift from narrow AI to general-purpose AI in pharma.</li><li>Benchmarking and evaluating foundation models for different use cases.</li><li>Commercializing Helical’s platform through partnerships with pharma companies.</li><li>Insight into the role of open-source AI in advancing biological research.</li><li>The future of biological foundation models: scaling up for greater impact.</li><li>Rick’s vision for Helical as the backbone of in silico pharma labs.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“The question is, how do I leverage [powerful biological foundation models] and – build use cases that matter and bring value? Helical is building a therapeutic area, an agnostic AI platform that is empowering single-cell RNA and DNA bio foundation models for drug discovery.” — Rick Schneider</p><p><br></p><p>“In bio, you can still innovate on the architecture side and not simply [with] the scale of the models. It's not simply by throwing more compute at the models that you get to the very best outcomes.” — Rick Schneider</p><p><br></p><p>“Be okay with being different in your approach and accept [that you will] be contrarian to certain things.” — Rick Schneider</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/rick-schneider-464688140/">Rick Schneider on LinkedIn</a></p><p><a href="https://www.helical-ai.com/">Helical</a></p><p><a href="https://github.com/helicalAI/helical">Helical on GitHub</a></p><p><a href="https://huggingface.co/helical-ai">Helical on Hugging Face</a></p><p><a href="https://www.helical-ai.com/blog/helix-mrna-v0">Introducing Helix-mRNA-v0</a></p><p><a href="https://huggingface.co/helical-ai/helix-mRNA">Helix-mRNA</a></p><p><a href="https://doi.org/10.48550/arXiv.2502.13785">Helix-mRNA: A Hybrid Foundation Model For Full Sequence mRNA Therapeutics</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, foundation models, drug discovery, bio foundation models</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/5f4c6e90/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Streamlining Radiology with Junaid Kalia from NeuroCareAI</title>
      <itunes:episode>120</itunes:episode>
      <podcast:episode>120</podcast:episode>
      <itunes:title>Streamlining Radiology with Junaid Kalia from NeuroCareAI</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">4e0a325f-0f11-4ce8-8dc0-9a39f28ad2dd</guid>
      <link>https://pixelscientia.com/podcast/streamlining-radiology-with-junaid-kalia-from-neurocareai/</link>
      <description>
        <![CDATA[<p>AI tools for healthcare are becoming more prevalent than ever before, and today, we explore how this could help usher in a future of democratized healthcare for all. I am joined by the neurocritical stroke and epilepsy specialist Junaid Kalia, MD, founder of NeuroCareAI – an innovative enterprise utilizing artificial intelligence solutions to enhance health outcomes and efficiency.</p><p>Junaid begins with his professional background and what led him to found NeuroCareAI before explaining what his company does and the products and services it offers. Then, we unpack the primary data sets that inform NeuroCareAI’s work, how to overcome the challenges of combining varied data types, the ethical responsibilities of AI, and how to ensure generalizability is upheld over long periods. To end, we learn why it’s essential to distinguish explainability from reason, how to mitigate the effects of bias on radiology data, how the regulatory process stunts the development of machine learning solutions, and Junaid’s vision of the future of NeuroCareAI. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Junaid Kalia walks us through his professional background and why he formed NeuroCareAI.</li><li>The ins and outs of NeuroCareAI and how it incorporates AI into its products and services. </li><li>Understanding the two main forms of data that govern the company’s work. </li><li>The challenges of combining different data types and how to overcome them.  </li><li>Unpacking the ethical responsibilities of AI. </li><li>Generalizability over time: How Junaid and his team ensure their models continue to perform.</li><li>Model accuracy versus explainability, and distinguishing explainability from reason. </li><li>How bias affects models trained on radiology data and how to mitigate this. </li><li>The way the regulatory process affects the development of machine learning solutions.</li><li>Junaid Kalia’s advice for other leaders of AI-powered startups. </li><li>His view on the future of NeuroCareAI. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Coming from a very low resource country like Pakistan, I wanted to start a project in which AI can help democratize in countries with low resource settings.” — Junaid Kalia</p><p><br></p><p>“Our mission is if you save a life, it is as if you save the life of all mankind.” — Junaid Kalia</p><p><br></p><p>“When you are deploying artificial intelligence, you need to make sure that it's deployed ethically. [For] some of these things, we do expect our partner sites – [to] have a real quality assurance system in place before they can deploy my artificial intelligence, because I just want to be ethical.” — Junaid Kalia</p><p><br></p><p>“We need to differentiate [and] distinguish between reasoning and explainability. In the vision world, I believe that explainability is nice to have. In the large language models space, reasoning, in my opinion, is a must-have.” — Junaid Kalia</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/junaidkaliamd/">Junaid Kalia on LinkedIn</a></p><p><a href="https://x.com/junaidkaliamd">Junaid Kalia on X</a><br><a href="https://neurocare.ai/">NeuroCareAI</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>AI tools for healthcare are becoming more prevalent than ever before, and today, we explore how this could help usher in a future of democratized healthcare for all. I am joined by the neurocritical stroke and epilepsy specialist Junaid Kalia, MD, founder of NeuroCareAI – an innovative enterprise utilizing artificial intelligence solutions to enhance health outcomes and efficiency.</p><p>Junaid begins with his professional background and what led him to found NeuroCareAI before explaining what his company does and the products and services it offers. Then, we unpack the primary data sets that inform NeuroCareAI’s work, how to overcome the challenges of combining varied data types, the ethical responsibilities of AI, and how to ensure generalizability is upheld over long periods. To end, we learn why it’s essential to distinguish explainability from reason, how to mitigate the effects of bias on radiology data, how the regulatory process stunts the development of machine learning solutions, and Junaid’s vision of the future of NeuroCareAI. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Junaid Kalia walks us through his professional background and why he formed NeuroCareAI.</li><li>The ins and outs of NeuroCareAI and how it incorporates AI into its products and services. </li><li>Understanding the two main forms of data that govern the company’s work. </li><li>The challenges of combining different data types and how to overcome them.  </li><li>Unpacking the ethical responsibilities of AI. </li><li>Generalizability over time: How Junaid and his team ensure their models continue to perform.</li><li>Model accuracy versus explainability, and distinguishing explainability from reason. </li><li>How bias affects models trained on radiology data and how to mitigate this. </li><li>The way the regulatory process affects the development of machine learning solutions.</li><li>Junaid Kalia’s advice for other leaders of AI-powered startups. </li><li>His view on the future of NeuroCareAI. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Coming from a very low resource country like Pakistan, I wanted to start a project in which AI can help democratize in countries with low resource settings.” — Junaid Kalia</p><p><br></p><p>“Our mission is if you save a life, it is as if you save the life of all mankind.” — Junaid Kalia</p><p><br></p><p>“When you are deploying artificial intelligence, you need to make sure that it's deployed ethically. [For] some of these things, we do expect our partner sites – [to] have a real quality assurance system in place before they can deploy my artificial intelligence, because I just want to be ethical.” — Junaid Kalia</p><p><br></p><p>“We need to differentiate [and] distinguish between reasoning and explainability. In the vision world, I believe that explainability is nice to have. In the large language models space, reasoning, in my opinion, is a must-have.” — Junaid Kalia</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/junaidkaliamd/">Junaid Kalia on LinkedIn</a></p><p><a href="https://x.com/junaidkaliamd">Junaid Kalia on X</a><br><a href="https://neurocare.ai/">NeuroCareAI</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 24 Mar 2025 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/48294fb0/a2ff39b5.mp3" length="21690853" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/Jy_me00l2pC0Xps5okeH5S3rluhKY4-xb609M4lgIXw/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lMWIz/ZmZmZWNiOGQ4M2Qx/ZjI2ZWIxMzFiNWVl/NDc4OC5qcGVn.jpg"/>
      <itunes:duration>1349</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>AI tools for healthcare are becoming more prevalent than ever before, and today, we explore how this could help usher in a future of democratized healthcare for all. I am joined by the neurocritical stroke and epilepsy specialist Junaid Kalia, MD, founder of NeuroCareAI – an innovative enterprise utilizing artificial intelligence solutions to enhance health outcomes and efficiency.</p><p>Junaid begins with his professional background and what led him to found NeuroCareAI before explaining what his company does and the products and services it offers. Then, we unpack the primary data sets that inform NeuroCareAI’s work, how to overcome the challenges of combining varied data types, the ethical responsibilities of AI, and how to ensure generalizability is upheld over long periods. To end, we learn why it’s essential to distinguish explainability from reason, how to mitigate the effects of bias on radiology data, how the regulatory process stunts the development of machine learning solutions, and Junaid’s vision of the future of NeuroCareAI. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Junaid Kalia walks us through his professional background and why he formed NeuroCareAI.</li><li>The ins and outs of NeuroCareAI and how it incorporates AI into its products and services. </li><li>Understanding the two main forms of data that govern the company’s work. </li><li>The challenges of combining different data types and how to overcome them.  </li><li>Unpacking the ethical responsibilities of AI. </li><li>Generalizability over time: How Junaid and his team ensure their models continue to perform.</li><li>Model accuracy versus explainability, and distinguishing explainability from reason. </li><li>How bias affects models trained on radiology data and how to mitigate this. </li><li>The way the regulatory process affects the development of machine learning solutions.</li><li>Junaid Kalia’s advice for other leaders of AI-powered startups. </li><li>His view on the future of NeuroCareAI. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Coming from a very low resource country like Pakistan, I wanted to start a project in which AI can help democratize in countries with low resource settings.” — Junaid Kalia</p><p><br></p><p>“Our mission is if you save a life, it is as if you save the life of all mankind.” — Junaid Kalia</p><p><br></p><p>“When you are deploying artificial intelligence, you need to make sure that it's deployed ethically. [For] some of these things, we do expect our partner sites – [to] have a real quality assurance system in place before they can deploy my artificial intelligence, because I just want to be ethical.” — Junaid Kalia</p><p><br></p><p>“We need to differentiate [and] distinguish between reasoning and explainability. In the vision world, I believe that explainability is nice to have. In the large language models space, reasoning, in my opinion, is a must-have.” — Junaid Kalia</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/junaidkaliamd/">Junaid Kalia on LinkedIn</a></p><p><a href="https://x.com/junaidkaliamd">Junaid Kalia on X</a><br><a href="https://neurocare.ai/">NeuroCareAI</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, radiology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/48294fb0/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Foundation Model Series: Advancing Precision Medicine in Radiology with Paul Hérent from Raidium</title>
      <itunes:episode>119</itunes:episode>
      <podcast:episode>119</podcast:episode>
      <itunes:title>Foundation Model Series: Advancing Precision Medicine in Radiology with Paul Hérent from Raidium</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">f6c4951d-6dce-470e-9291-547d7f23dd76</guid>
      <link>https://pixelscientia.com/podcast/advancing-precision-medicine-in-radiology-with-paul-herent-from-raidium/</link>
      <description>
        <![CDATA[<p>Radiologists face a growing demand for imaging analysis, yet existing AI tools remain fragmented, each solving only a small part of the workflow. Today, we continue our series on domain-specific foundation models with Paul Hérent, Co-Founder and CEO of Raidium. He joins us to discuss how foundation models could revolutionize radiology by providing a single AI-powered solution for multiple imaging modalities.</p><p>Paul shares his journey from radiologist to AI entrepreneur, explaining how his background in cognitive science and medical imaging led him to co-found Raidium. He breaks down the challenges of building a foundation model for radiology, from handling massive datasets to addressing bias and regulatory hurdles, and their approach at Raidium. We also explore Raidium’s vision for the future: its plans to refine multimodal AI, expand its applications beyond radiology, and commercialize its technology to improve patient care worldwide. Tune in to learn how foundation models could shape the future of radiology, enhance patient care, and expand global access to medical imaging!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Paul Hérent’s background in radiology, cognitive science, and founding Raidium.</li><li>Why existing AI tools in radiology are fragmented and have limited adoption.</li><li>How Raidium’s foundation model unifies multiple radiology tasks.</li><li>Raidium’s multimodal AI: handling diverse imaging types in one system.</li><li>Outlining the vast, diverse data used to train Raidium’s model, including radiology reports.</li><li>The teams, compute power, and infrastructure behind Raidium’s AI development.</li><li>Challenges in data curation, regulatory hurdles, and proving clinical value.</li><li>What makes a good foundation model and the role of self-supervised learning (SSL).</li><li>Insights into how Raidium benchmarks its model using rigorous medical imaging tests.</li><li>The role of diverse data, human oversight, and continuous learning in reducing bias.</li><li>Their current R&amp;D phase and plans for commercialization.</li><li>Key lessons Paul learned about AI startups, from data needs to product-market fit.</li><li>The future of foundation models in radiology and beyond.</li><li>Paul’s advice to AI founders: Build a team with both AI and domain expertise.</li><li>Raidium’s vision: Improving the lives of patients and global healthcare access.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“In practice, there is still little AI adoption because every solution solves only a tiny part of what radiologist do. [For radiologists] it's a wider job. We want, as a radiologist, to have one tool to rule all modalities.” — Paul Hérent<strong><br></strong><br></p><p>“Data is key. If you have good data, not only to build a data set, but proprietary data, challenging data, rare data in a specific domain. It's very valuable because the architecture is not particularly innovative.” — Paul Hérent</p><p><br></p><p>“Build a team with people you trust. Entrepreneurship is not trivial. Be complementary.” — Paul Hérent</p><p><br></p><p>“The dream of Raidium is to build something that has a huge impact on a patient's life.” — Paul Hérent<strong><br></strong><br></p><p>“If we go beyond the rich countries, many, many people have no access to radiology. Two-thirds of countries don’t have access to radiologists. It's a big need. If we can contribute with our approach to more accessible health, we will be very happy.” — Paul Hérent</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/paul-herent-m-d-msc-51b371160/?originalSubdomain=fr">Paul Hérent on LinkedIn</a></p><p><a href="https://scholar.google.com/citations?user=ZS9f4Q0AAAAJ">Paul Hérent on Google Scholar</a></p><p><a href="https://www.raidium.eu/">Raidium</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Radiologists face a growing demand for imaging analysis, yet existing AI tools remain fragmented, each solving only a small part of the workflow. Today, we continue our series on domain-specific foundation models with Paul Hérent, Co-Founder and CEO of Raidium. He joins us to discuss how foundation models could revolutionize radiology by providing a single AI-powered solution for multiple imaging modalities.</p><p>Paul shares his journey from radiologist to AI entrepreneur, explaining how his background in cognitive science and medical imaging led him to co-found Raidium. He breaks down the challenges of building a foundation model for radiology, from handling massive datasets to addressing bias and regulatory hurdles, and their approach at Raidium. We also explore Raidium’s vision for the future: its plans to refine multimodal AI, expand its applications beyond radiology, and commercialize its technology to improve patient care worldwide. Tune in to learn how foundation models could shape the future of radiology, enhance patient care, and expand global access to medical imaging!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Paul Hérent’s background in radiology, cognitive science, and founding Raidium.</li><li>Why existing AI tools in radiology are fragmented and have limited adoption.</li><li>How Raidium’s foundation model unifies multiple radiology tasks.</li><li>Raidium’s multimodal AI: handling diverse imaging types in one system.</li><li>Outlining the vast, diverse data used to train Raidium’s model, including radiology reports.</li><li>The teams, compute power, and infrastructure behind Raidium’s AI development.</li><li>Challenges in data curation, regulatory hurdles, and proving clinical value.</li><li>What makes a good foundation model and the role of self-supervised learning (SSL).</li><li>Insights into how Raidium benchmarks its model using rigorous medical imaging tests.</li><li>The role of diverse data, human oversight, and continuous learning in reducing bias.</li><li>Their current R&amp;D phase and plans for commercialization.</li><li>Key lessons Paul learned about AI startups, from data needs to product-market fit.</li><li>The future of foundation models in radiology and beyond.</li><li>Paul’s advice to AI founders: Build a team with both AI and domain expertise.</li><li>Raidium’s vision: Improving the lives of patients and global healthcare access.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“In practice, there is still little AI adoption because every solution solves only a tiny part of what radiologist do. [For radiologists] it's a wider job. We want, as a radiologist, to have one tool to rule all modalities.” — Paul Hérent<strong><br></strong><br></p><p>“Data is key. If you have good data, not only to build a data set, but proprietary data, challenging data, rare data in a specific domain. It's very valuable because the architecture is not particularly innovative.” — Paul Hérent</p><p><br></p><p>“Build a team with people you trust. Entrepreneurship is not trivial. Be complementary.” — Paul Hérent</p><p><br></p><p>“The dream of Raidium is to build something that has a huge impact on a patient's life.” — Paul Hérent<strong><br></strong><br></p><p>“If we go beyond the rich countries, many, many people have no access to radiology. Two-thirds of countries don’t have access to radiologists. It's a big need. If we can contribute with our approach to more accessible health, we will be very happy.” — Paul Hérent</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/paul-herent-m-d-msc-51b371160/?originalSubdomain=fr">Paul Hérent on LinkedIn</a></p><p><a href="https://scholar.google.com/citations?user=ZS9f4Q0AAAAJ">Paul Hérent on Google Scholar</a></p><p><a href="https://www.raidium.eu/">Raidium</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 03 Mar 2025 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/7576235a/cc0463f9.mp3" length="21883932" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/GLSbsXYQ5AyEMjSRoQvxrWUBrvNOF9B3fu_Fk6So9jc/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS84N2M4/MjRlNjQ5YTlmNzhi/ZGFkNjRjYjEyZDVl/YTQ5Zi5qcGVn.jpg"/>
      <itunes:duration>1356</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Radiologists face a growing demand for imaging analysis, yet existing AI tools remain fragmented, each solving only a small part of the workflow. Today, we continue our series on domain-specific foundation models with Paul Hérent, Co-Founder and CEO of Raidium. He joins us to discuss how foundation models could revolutionize radiology by providing a single AI-powered solution for multiple imaging modalities.</p><p>Paul shares his journey from radiologist to AI entrepreneur, explaining how his background in cognitive science and medical imaging led him to co-found Raidium. He breaks down the challenges of building a foundation model for radiology, from handling massive datasets to addressing bias and regulatory hurdles, and their approach at Raidium. We also explore Raidium’s vision for the future: its plans to refine multimodal AI, expand its applications beyond radiology, and commercialize its technology to improve patient care worldwide. Tune in to learn how foundation models could shape the future of radiology, enhance patient care, and expand global access to medical imaging!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Paul Hérent’s background in radiology, cognitive science, and founding Raidium.</li><li>Why existing AI tools in radiology are fragmented and have limited adoption.</li><li>How Raidium’s foundation model unifies multiple radiology tasks.</li><li>Raidium’s multimodal AI: handling diverse imaging types in one system.</li><li>Outlining the vast, diverse data used to train Raidium’s model, including radiology reports.</li><li>The teams, compute power, and infrastructure behind Raidium’s AI development.</li><li>Challenges in data curation, regulatory hurdles, and proving clinical value.</li><li>What makes a good foundation model and the role of self-supervised learning (SSL).</li><li>Insights into how Raidium benchmarks its model using rigorous medical imaging tests.</li><li>The role of diverse data, human oversight, and continuous learning in reducing bias.</li><li>Their current R&amp;D phase and plans for commercialization.</li><li>Key lessons Paul learned about AI startups, from data needs to product-market fit.</li><li>The future of foundation models in radiology and beyond.</li><li>Paul’s advice to AI founders: Build a team with both AI and domain expertise.</li><li>Raidium’s vision: Improving the lives of patients and global healthcare access.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“In practice, there is still little AI adoption because every solution solves only a tiny part of what radiologist do. [For radiologists] it's a wider job. We want, as a radiologist, to have one tool to rule all modalities.” — Paul Hérent<strong><br></strong><br></p><p>“Data is key. If you have good data, not only to build a data set, but proprietary data, challenging data, rare data in a specific domain. It's very valuable because the architecture is not particularly innovative.” — Paul Hérent</p><p><br></p><p>“Build a team with people you trust. Entrepreneurship is not trivial. Be complementary.” — Paul Hérent</p><p><br></p><p>“The dream of Raidium is to build something that has a huge impact on a patient's life.” — Paul Hérent<strong><br></strong><br></p><p>“If we go beyond the rich countries, many, many people have no access to radiology. Two-thirds of countries don’t have access to radiologists. It's a big need. If we can contribute with our approach to more accessible health, we will be very happy.” — Paul Hérent</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/paul-herent-m-d-msc-51b371160/?originalSubdomain=fr">Paul Hérent on LinkedIn</a></p><p><a href="https://scholar.google.com/citations?user=ZS9f4Q0AAAAJ">Paul Hérent on Google Scholar</a></p><p><a href="https://www.raidium.eu/">Raidium</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, radiology, precision medicine, foundation models</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/7576235a/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Foundation Model Series: Advancing Endoscopy with Matt Schwartz from Virgo</title>
      <itunes:episode>118</itunes:episode>
      <podcast:episode>118</podcast:episode>
      <itunes:title>Foundation Model Series: Advancing Endoscopy with Matt Schwartz from Virgo</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">0385a3ed-979d-4761-becb-676c51529918</guid>
      <link>https://pixelscientia.com/podcast/advancing-endoscopy-with-matt-schwartz-from-virgo/</link>
      <description>
        <![CDATA[<p>What if a routine endoscopy could do more than just detect disease by actually predicting treatment outcomes and revolutionizing precision medicine? In this episode of Impact AI, Matt Schwartz, CEO and Co-Founder of endoscopy video management and AI analysis platform Virgo, discusses how AI and machine learning are transforming endoscopy.</p><p>Tuning in, you’ll learn how Virgo’s foundation model, EndoDINO, trained on the largest endoscopic video dataset in the world, is unlocking new possibilities in gastroenterology. Matt also shares how automated video capture, AI-powered diagnostics, and predictive analytics are reshaping patient care, with a particular focus on improving treatment for inflammatory bowel disease (IBD). Join us to discover how domain-specific foundation models are redefining healthcare and what this means for the future of precision medicine!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Matt Schwartz and Virgo’s mission.</li><li>The importance of video documentation in endoscopy and its impact on healthcare.</li><li>Machine learning’s role in automating endoscopic video capture and clinical trial recruitment.</li><li>Building the EndoDINO foundation model to unlock endoscopy data for precision medicine.</li><li>Data collection: the process of gathering 130,000+ procedure videos for model training.</li><li>Foundation model development using self-supervised learning and DINOv2.</li><li>Model development challenges, from hyper-parameter tuning to domain-specific adjustments.</li><li>Applying EndoDINO to predict inflammatory bowel disease (IBD) treatment responses.</li><li>Commercializing EndoDINO through licensing to health systems and pharma companies.</li><li>The future of foundation models in endoscopy: expanding applications beyond GI diseases.</li><li>Advice for AI startup founders to prioritize data capture as a foundation for AI success.</li><li>Insight into Virgo’s vision to transform IBD treatment and preventative care.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“There's a massive amount of endoscopic video data being generated across a wide range of endoscopic procedures, and nobody was capturing that data – [Virgo] realized early on that endoscopy data could hold the key to unlocking all sorts of opportunities in precision medicine.” — Matt Schwartz</p><p><br></p><p>“With the foundation model paradigm, you can compress a lot of heavy compute needs into a single model and then build different applications on top of the foundation. This is going to have a positive impact on the clinical deployment of foundation models.” — Matt Schwartz</p><p><br></p><p>“Our foundation model can turn something like a routine colonoscopy into a precision medicine screening tool for IBD patients.” — Matt Schwartz</p><p><br></p><p>“There are a lot of untapped data resources in healthcare. If a founder can build a first product that is the data capture engine, it will set them up for a ton of future success when it comes to AI development.” — Matt Schwartz</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://virgosvs.com/">Virgo</a></p><p><a href="https://www.linkedin.com/in/mzschwartz88/">Matt Schwartz on LinkedIn</a></p><p><a href="https://x.com/MattZschwartz">Matt Schwartz on X</a></p><p><a href="https://www.endoml.ai/">EndoML</a></p><p><a href="https://virgosvs.com/blog/introducing-endodino-a-breakthrough-in-endoscopic-ai/">Introducing EndoDINO: A Breakthrough in Endoscopic AI</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>What if a routine endoscopy could do more than just detect disease by actually predicting treatment outcomes and revolutionizing precision medicine? In this episode of Impact AI, Matt Schwartz, CEO and Co-Founder of endoscopy video management and AI analysis platform Virgo, discusses how AI and machine learning are transforming endoscopy.</p><p>Tuning in, you’ll learn how Virgo’s foundation model, EndoDINO, trained on the largest endoscopic video dataset in the world, is unlocking new possibilities in gastroenterology. Matt also shares how automated video capture, AI-powered diagnostics, and predictive analytics are reshaping patient care, with a particular focus on improving treatment for inflammatory bowel disease (IBD). Join us to discover how domain-specific foundation models are redefining healthcare and what this means for the future of precision medicine!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Matt Schwartz and Virgo’s mission.</li><li>The importance of video documentation in endoscopy and its impact on healthcare.</li><li>Machine learning’s role in automating endoscopic video capture and clinical trial recruitment.</li><li>Building the EndoDINO foundation model to unlock endoscopy data for precision medicine.</li><li>Data collection: the process of gathering 130,000+ procedure videos for model training.</li><li>Foundation model development using self-supervised learning and DINOv2.</li><li>Model development challenges, from hyper-parameter tuning to domain-specific adjustments.</li><li>Applying EndoDINO to predict inflammatory bowel disease (IBD) treatment responses.</li><li>Commercializing EndoDINO through licensing to health systems and pharma companies.</li><li>The future of foundation models in endoscopy: expanding applications beyond GI diseases.</li><li>Advice for AI startup founders to prioritize data capture as a foundation for AI success.</li><li>Insight into Virgo’s vision to transform IBD treatment and preventative care.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“There's a massive amount of endoscopic video data being generated across a wide range of endoscopic procedures, and nobody was capturing that data – [Virgo] realized early on that endoscopy data could hold the key to unlocking all sorts of opportunities in precision medicine.” — Matt Schwartz</p><p><br></p><p>“With the foundation model paradigm, you can compress a lot of heavy compute needs into a single model and then build different applications on top of the foundation. This is going to have a positive impact on the clinical deployment of foundation models.” — Matt Schwartz</p><p><br></p><p>“Our foundation model can turn something like a routine colonoscopy into a precision medicine screening tool for IBD patients.” — Matt Schwartz</p><p><br></p><p>“There are a lot of untapped data resources in healthcare. If a founder can build a first product that is the data capture engine, it will set them up for a ton of future success when it comes to AI development.” — Matt Schwartz</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://virgosvs.com/">Virgo</a></p><p><a href="https://www.linkedin.com/in/mzschwartz88/">Matt Schwartz on LinkedIn</a></p><p><a href="https://x.com/MattZschwartz">Matt Schwartz on X</a></p><p><a href="https://www.endoml.ai/">EndoML</a></p><p><a href="https://virgosvs.com/blog/introducing-endodino-a-breakthrough-in-endoscopic-ai/">Introducing EndoDINO: A Breakthrough in Endoscopic AI</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 24 Feb 2025 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/6dbb7e3d/40bc1dba.mp3" length="30423220" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/PNe05_X-JODstviIkM108kW2CY2ippBDpiQ4xHwj5Vo/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9mZTFj/Yjg4ZmQ5ZmVjNWNl/MmRmNGQ2ZDYxYThm/NmI3Zi5qcGVn.jpg"/>
      <itunes:duration>1264</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>What if a routine endoscopy could do more than just detect disease by actually predicting treatment outcomes and revolutionizing precision medicine? In this episode of Impact AI, Matt Schwartz, CEO and Co-Founder of endoscopy video management and AI analysis platform Virgo, discusses how AI and machine learning are transforming endoscopy.</p><p>Tuning in, you’ll learn how Virgo’s foundation model, EndoDINO, trained on the largest endoscopic video dataset in the world, is unlocking new possibilities in gastroenterology. Matt also shares how automated video capture, AI-powered diagnostics, and predictive analytics are reshaping patient care, with a particular focus on improving treatment for inflammatory bowel disease (IBD). Join us to discover how domain-specific foundation models are redefining healthcare and what this means for the future of precision medicine!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Matt Schwartz and Virgo’s mission.</li><li>The importance of video documentation in endoscopy and its impact on healthcare.</li><li>Machine learning’s role in automating endoscopic video capture and clinical trial recruitment.</li><li>Building the EndoDINO foundation model to unlock endoscopy data for precision medicine.</li><li>Data collection: the process of gathering 130,000+ procedure videos for model training.</li><li>Foundation model development using self-supervised learning and DINOv2.</li><li>Model development challenges, from hyper-parameter tuning to domain-specific adjustments.</li><li>Applying EndoDINO to predict inflammatory bowel disease (IBD) treatment responses.</li><li>Commercializing EndoDINO through licensing to health systems and pharma companies.</li><li>The future of foundation models in endoscopy: expanding applications beyond GI diseases.</li><li>Advice for AI startup founders to prioritize data capture as a foundation for AI success.</li><li>Insight into Virgo’s vision to transform IBD treatment and preventative care.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“There's a massive amount of endoscopic video data being generated across a wide range of endoscopic procedures, and nobody was capturing that data – [Virgo] realized early on that endoscopy data could hold the key to unlocking all sorts of opportunities in precision medicine.” — Matt Schwartz</p><p><br></p><p>“With the foundation model paradigm, you can compress a lot of heavy compute needs into a single model and then build different applications on top of the foundation. This is going to have a positive impact on the clinical deployment of foundation models.” — Matt Schwartz</p><p><br></p><p>“Our foundation model can turn something like a routine colonoscopy into a precision medicine screening tool for IBD patients.” — Matt Schwartz</p><p><br></p><p>“There are a lot of untapped data resources in healthcare. If a founder can build a first product that is the data capture engine, it will set them up for a ton of future success when it comes to AI development.” — Matt Schwartz</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://virgosvs.com/">Virgo</a></p><p><a href="https://www.linkedin.com/in/mzschwartz88/">Matt Schwartz on LinkedIn</a></p><p><a href="https://x.com/MattZschwartz">Matt Schwartz on X</a></p><p><a href="https://www.endoml.ai/">EndoML</a></p><p><a href="https://virgosvs.com/blog/introducing-endodino-a-breakthrough-in-endoscopic-ai/">Introducing EndoDINO: A Breakthrough in Endoscopic AI</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, endoscopy, foundation model</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/6dbb7e3d/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Foundation Model Series: Transforming Biology with Zelda Mariet from Bioptimus</title>
      <itunes:episode>117</itunes:episode>
      <podcast:episode>117</podcast:episode>
      <itunes:title>Foundation Model Series: Transforming Biology with Zelda Mariet from Bioptimus</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">3fbf3d5b-6026-49d3-a61e-2894c74a1ce9</guid>
      <link>https://pixelscientia.com/podcast/transforming-biology-with-zelda-mariet-from-bioptimus/</link>
      <description>
        <![CDATA[<p>Zelda Mariet, Co-Founder and Principal Research Scientist at Bioptimus, joins me to continue our series of conversations on the vast possibilities and diverse applications of foundation models. Today’s discussion focuses on how foundation models are transforming biology. Zelda shares insights into Bioptimus’ work and why it’s so critical in this field. She breaks down the three core components involved in building these models and explains what sets their histopathology model apart from the many others being published today. They also explore the methodology for properly benchmarking the quality and performance of foundation models, Bioptimus’ strategy for commercializing its technology, and much more. To learn more about Bioptimus, their plans beyond pathology, and the impact they hope to make in the next three to five years, tune in now.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Who is Zelda Mariet and what led her to create Bioptimus. </li><li>What Bioptimus does and why it’s so important.</li><li>Why their first model announced was for pathology.</li><li>Zelda breaks down three core components that go into building a foundation model.</li><li>How their histopathology foundation model is different from the number of other models published at this point.</li><li>Their methodology behind properly benchmarking how well their foundation model performs.</li><li>Different challenges they’ve encountered on their foundation model journey.</li><li>How they plan to commercialize their technology at Bioptimus. </li><li>Thoughts on whether open source is part of their long-term strategy for the model, and why.  </li><li>Developing a product roadmap for a foundation model.</li><li>She shares some information regarding their next step, beyond pathology, at Bioptimus.</li><li>The importance of understanding what kind of structure you want to capture in your data.</li><li>Where she sees the impact of Bioptimus in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Working on biological data became a little bit of a fascination of mine because I was so instinctively annoyed at how hard it was to do.” — Zelda Mariet</p><p><br></p><p><strong>“</strong>Bioptimus is building foundation models for biology. Foundation models are essentially machine learning models that take an extremely long time to train [and] are trained over an incredible amount of data.” — Zelda Mariet</p><p><br></p><p>“There are two things that are well-known about foundation models, they’re hungry in terms of data and they’re hungry in terms of compute.” — Zelda Mariet</p><p><br></p><p>“On the philosophical side, science is something that progresses as a community, and as much as we have, what I would say is a frankly amazing team at Bioptimus, we don’t have a monopoly on people who understand the problems we’re trying to solve. And having our model be accessible is one way to gain access into the broader community to get insight and to help people who want to use our models, get insight into maybe where we’re not doing as well that we need to improve<strong>.” </strong>— Zelda Mariet</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/negative-dependence-for-ml/">Zelda Mariet on LinkedIn</a></p><p><a href="https://zelda.lids.mit.edu/">Zelda Mariet</a></p><p><a href="https://www.bioptimus.com/">Bioptimus</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Zelda Mariet, Co-Founder and Principal Research Scientist at Bioptimus, joins me to continue our series of conversations on the vast possibilities and diverse applications of foundation models. Today’s discussion focuses on how foundation models are transforming biology. Zelda shares insights into Bioptimus’ work and why it’s so critical in this field. She breaks down the three core components involved in building these models and explains what sets their histopathology model apart from the many others being published today. They also explore the methodology for properly benchmarking the quality and performance of foundation models, Bioptimus’ strategy for commercializing its technology, and much more. To learn more about Bioptimus, their plans beyond pathology, and the impact they hope to make in the next three to five years, tune in now.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Who is Zelda Mariet and what led her to create Bioptimus. </li><li>What Bioptimus does and why it’s so important.</li><li>Why their first model announced was for pathology.</li><li>Zelda breaks down three core components that go into building a foundation model.</li><li>How their histopathology foundation model is different from the number of other models published at this point.</li><li>Their methodology behind properly benchmarking how well their foundation model performs.</li><li>Different challenges they’ve encountered on their foundation model journey.</li><li>How they plan to commercialize their technology at Bioptimus. </li><li>Thoughts on whether open source is part of their long-term strategy for the model, and why.  </li><li>Developing a product roadmap for a foundation model.</li><li>She shares some information regarding their next step, beyond pathology, at Bioptimus.</li><li>The importance of understanding what kind of structure you want to capture in your data.</li><li>Where she sees the impact of Bioptimus in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Working on biological data became a little bit of a fascination of mine because I was so instinctively annoyed at how hard it was to do.” — Zelda Mariet</p><p><br></p><p><strong>“</strong>Bioptimus is building foundation models for biology. Foundation models are essentially machine learning models that take an extremely long time to train [and] are trained over an incredible amount of data.” — Zelda Mariet</p><p><br></p><p>“There are two things that are well-known about foundation models, they’re hungry in terms of data and they’re hungry in terms of compute.” — Zelda Mariet</p><p><br></p><p>“On the philosophical side, science is something that progresses as a community, and as much as we have, what I would say is a frankly amazing team at Bioptimus, we don’t have a monopoly on people who understand the problems we’re trying to solve. And having our model be accessible is one way to gain access into the broader community to get insight and to help people who want to use our models, get insight into maybe where we’re not doing as well that we need to improve<strong>.” </strong>— Zelda Mariet</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/negative-dependence-for-ml/">Zelda Mariet on LinkedIn</a></p><p><a href="https://zelda.lids.mit.edu/">Zelda Mariet</a></p><p><a href="https://www.bioptimus.com/">Bioptimus</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 17 Feb 2025 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/8e4953c8/6e767d7f.mp3" length="30916932" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/kRcvRA5LsbhJwGkNOd8vpOxk9v0ynSzcgi_b_x03CWo/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9kNjMz/ZWI0ZjJkNGMxZTVh/MWZlY2RmYzAxZTRj/YTY2Yy5wbmc.jpg"/>
      <itunes:duration>1286</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Zelda Mariet, Co-Founder and Principal Research Scientist at Bioptimus, joins me to continue our series of conversations on the vast possibilities and diverse applications of foundation models. Today’s discussion focuses on how foundation models are transforming biology. Zelda shares insights into Bioptimus’ work and why it’s so critical in this field. She breaks down the three core components involved in building these models and explains what sets their histopathology model apart from the many others being published today. They also explore the methodology for properly benchmarking the quality and performance of foundation models, Bioptimus’ strategy for commercializing its technology, and much more. To learn more about Bioptimus, their plans beyond pathology, and the impact they hope to make in the next three to five years, tune in now.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Who is Zelda Mariet and what led her to create Bioptimus. </li><li>What Bioptimus does and why it’s so important.</li><li>Why their first model announced was for pathology.</li><li>Zelda breaks down three core components that go into building a foundation model.</li><li>How their histopathology foundation model is different from the number of other models published at this point.</li><li>Their methodology behind properly benchmarking how well their foundation model performs.</li><li>Different challenges they’ve encountered on their foundation model journey.</li><li>How they plan to commercialize their technology at Bioptimus. </li><li>Thoughts on whether open source is part of their long-term strategy for the model, and why.  </li><li>Developing a product roadmap for a foundation model.</li><li>She shares some information regarding their next step, beyond pathology, at Bioptimus.</li><li>The importance of understanding what kind of structure you want to capture in your data.</li><li>Where she sees the impact of Bioptimus in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Working on biological data became a little bit of a fascination of mine because I was so instinctively annoyed at how hard it was to do.” — Zelda Mariet</p><p><br></p><p><strong>“</strong>Bioptimus is building foundation models for biology. Foundation models are essentially machine learning models that take an extremely long time to train [and] are trained over an incredible amount of data.” — Zelda Mariet</p><p><br></p><p>“There are two things that are well-known about foundation models, they’re hungry in terms of data and they’re hungry in terms of compute.” — Zelda Mariet</p><p><br></p><p>“On the philosophical side, science is something that progresses as a community, and as much as we have, what I would say is a frankly amazing team at Bioptimus, we don’t have a monopoly on people who understand the problems we’re trying to solve. And having our model be accessible is one way to gain access into the broader community to get insight and to help people who want to use our models, get insight into maybe where we’re not doing as well that we need to improve<strong>.” </strong>— Zelda Mariet</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/negative-dependence-for-ml/">Zelda Mariet on LinkedIn</a></p><p><a href="https://zelda.lids.mit.edu/">Zelda Mariet</a></p><p><a href="https://www.bioptimus.com/">Bioptimus</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, biology foundation models</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/8e4953c8/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Foundation Model Series: Democratizing Time Series Data Analysis with Max Mergenthaler Canseco from Nixtla</title>
      <itunes:episode>116</itunes:episode>
      <podcast:episode>116</podcast:episode>
      <itunes:title>Foundation Model Series: Democratizing Time Series Data Analysis with Max Mergenthaler Canseco from Nixtla</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">f3f467a7-f859-4e5d-905e-85981ba1a4c4</guid>
      <link>https://pixelscientia.com/podcast/democratizing-time-series-data-analysis-with-max-mergenthaler-canseco-from-nixtla/</link>
      <description>
        <![CDATA[<p>What if the hidden patterns of time series data could be unlocked to predict the future with remarkable accuracy? In this episode of Impact AI, I sit down with Max Mergenthaler Canseco to discuss democratizing time series data analysis through the development of foundation models. Max is the CEO and co-founder of Nixtla, a company specializing in time series research and deployment, aiming to democratize access to advanced predictive insights across various industries.</p><p>In our conversation, we explore the significance of time series data in real-world applications, the evolution of time series forecasting, and the shift away from traditional econometric models to the development of TimeGPT. Learn about the challenges faced in building foundation models for time series and a time series model’s practical applications across industries. Discover the future of time series models, the integration of multimodal data, scaling challenges, and the potential for greater adoption in both small businesses and large enterprises. Max also shares Nixtla’s vision for becoming the go-to solution for time series analysis and offers advice to leaders of AI-powered startups.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Max's background in philosophy, his transition to machine learning, and his path to Nixtla.</li><li>Why time series data is the “DNA of the world” and its role in businesses and institutions.</li><li>Nixtla's advanced forecasting algorithms, the benefits, and their application to industry.</li><li>Historical overview of time series forecasting and the development of modern approaches.</li><li>Learn about the advantages of foundation models for scalability, speed, and ease of use.</li><li>Uncover the range of datasets used to train Nixtla's foundation models and their sources.</li><li>Similarities and differences between training TimeGPT and large language models (LLMs).</li><li>Hear about the main challenges of building time series foundation models for forecasting. </li><li>How Nixtla ensures the quality of its models and the limitations of conventional benchmarks.</li><li>Explore the gap between benchmark performance and effectiveness in the real world.</li><li>He shares the current and upcoming plans for Nixtla and its TimeGPT foundation model. </li><li>He shares his predictions for the future of time series foundation models.</li><li>Advice for leaders of AI-powered startups and what impact he aims to make with Nixtla.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Time series are in one aspect, the DNA of the world.” — Max Mergenthaler Canseco</p><p><br></p><p>“Time is an essential component to understand a change of course, but also to understand our reality. So, time series is maybe a somewhat technical term for a very familiar aspect of our reality.” — Max Mergenthaler Canseco</p><p><br></p><p>“Given that we are all training on massive amounts of data and some of us are not disclosing which datasets we’re using, it’s always a problem for academics to try to benchmark foundation models because there might be leakage.” — Max Mergenthaler Canseco</p><p><br></p><p>“That’s an interesting aspect of foundation models in time series, that benchmarking is not as straightforward as one might think.” — Max Mergenthaler Canseco</p><p><br></p><p>“I think right now in our field probably benchmarks are not necessarily indicative of how well a model is going to perform in real-world data.” — Max Mergenthaler Canseco</p><p><br></p><p>“I think that we’re also going to see some of those intuitions that come from the LLM field translated into the time series field soon.” — Max Mergenthaler Canseco</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/mergenthaler/">Max Mergenthaler Canseco on LinkedIn</a></p><p><a href="https://www.nixtla.io/">Nixtla</a></p><p><a href="https://x.com/nixtlainc">Nixtla on X</a></p><p><a href="https://www.linkedin.com/company/nixtlainc/">Nixtla on LinkedIn</a></p><p><a href="https://github.com/nixtla">Nixtla on GitHub</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>What if the hidden patterns of time series data could be unlocked to predict the future with remarkable accuracy? In this episode of Impact AI, I sit down with Max Mergenthaler Canseco to discuss democratizing time series data analysis through the development of foundation models. Max is the CEO and co-founder of Nixtla, a company specializing in time series research and deployment, aiming to democratize access to advanced predictive insights across various industries.</p><p>In our conversation, we explore the significance of time series data in real-world applications, the evolution of time series forecasting, and the shift away from traditional econometric models to the development of TimeGPT. Learn about the challenges faced in building foundation models for time series and a time series model’s practical applications across industries. Discover the future of time series models, the integration of multimodal data, scaling challenges, and the potential for greater adoption in both small businesses and large enterprises. Max also shares Nixtla’s vision for becoming the go-to solution for time series analysis and offers advice to leaders of AI-powered startups.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Max's background in philosophy, his transition to machine learning, and his path to Nixtla.</li><li>Why time series data is the “DNA of the world” and its role in businesses and institutions.</li><li>Nixtla's advanced forecasting algorithms, the benefits, and their application to industry.</li><li>Historical overview of time series forecasting and the development of modern approaches.</li><li>Learn about the advantages of foundation models for scalability, speed, and ease of use.</li><li>Uncover the range of datasets used to train Nixtla's foundation models and their sources.</li><li>Similarities and differences between training TimeGPT and large language models (LLMs).</li><li>Hear about the main challenges of building time series foundation models for forecasting. </li><li>How Nixtla ensures the quality of its models and the limitations of conventional benchmarks.</li><li>Explore the gap between benchmark performance and effectiveness in the real world.</li><li>He shares the current and upcoming plans for Nixtla and its TimeGPT foundation model. </li><li>He shares his predictions for the future of time series foundation models.</li><li>Advice for leaders of AI-powered startups and what impact he aims to make with Nixtla.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Time series are in one aspect, the DNA of the world.” — Max Mergenthaler Canseco</p><p><br></p><p>“Time is an essential component to understand a change of course, but also to understand our reality. So, time series is maybe a somewhat technical term for a very familiar aspect of our reality.” — Max Mergenthaler Canseco</p><p><br></p><p>“Given that we are all training on massive amounts of data and some of us are not disclosing which datasets we’re using, it’s always a problem for academics to try to benchmark foundation models because there might be leakage.” — Max Mergenthaler Canseco</p><p><br></p><p>“That’s an interesting aspect of foundation models in time series, that benchmarking is not as straightforward as one might think.” — Max Mergenthaler Canseco</p><p><br></p><p>“I think right now in our field probably benchmarks are not necessarily indicative of how well a model is going to perform in real-world data.” — Max Mergenthaler Canseco</p><p><br></p><p>“I think that we’re also going to see some of those intuitions that come from the LLM field translated into the time series field soon.” — Max Mergenthaler Canseco</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/mergenthaler/">Max Mergenthaler Canseco on LinkedIn</a></p><p><a href="https://www.nixtla.io/">Nixtla</a></p><p><a href="https://x.com/nixtlainc">Nixtla on X</a></p><p><a href="https://www.linkedin.com/company/nixtlainc/">Nixtla on LinkedIn</a></p><p><a href="https://github.com/nixtla">Nixtla on GitHub</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 10 Feb 2025 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/ad3afc00/9a34c3ec.mp3" length="26280806" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/fgoBAw1ylwr5ciPZyiBZ6xF1jBJtBOfBdCsn3DPNDeY/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS85MmQ4/ZmFhYzMzMDE1Njdk/MTFlNTQxM2NmYjIz/MTkyZC5qcGVn.jpg"/>
      <itunes:duration>1631</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>What if the hidden patterns of time series data could be unlocked to predict the future with remarkable accuracy? In this episode of Impact AI, I sit down with Max Mergenthaler Canseco to discuss democratizing time series data analysis through the development of foundation models. Max is the CEO and co-founder of Nixtla, a company specializing in time series research and deployment, aiming to democratize access to advanced predictive insights across various industries.</p><p>In our conversation, we explore the significance of time series data in real-world applications, the evolution of time series forecasting, and the shift away from traditional econometric models to the development of TimeGPT. Learn about the challenges faced in building foundation models for time series and a time series model’s practical applications across industries. Discover the future of time series models, the integration of multimodal data, scaling challenges, and the potential for greater adoption in both small businesses and large enterprises. Max also shares Nixtla’s vision for becoming the go-to solution for time series analysis and offers advice to leaders of AI-powered startups.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Max's background in philosophy, his transition to machine learning, and his path to Nixtla.</li><li>Why time series data is the “DNA of the world” and its role in businesses and institutions.</li><li>Nixtla's advanced forecasting algorithms, the benefits, and their application to industry.</li><li>Historical overview of time series forecasting and the development of modern approaches.</li><li>Learn about the advantages of foundation models for scalability, speed, and ease of use.</li><li>Uncover the range of datasets used to train Nixtla's foundation models and their sources.</li><li>Similarities and differences between training TimeGPT and large language models (LLMs).</li><li>Hear about the main challenges of building time series foundation models for forecasting. </li><li>How Nixtla ensures the quality of its models and the limitations of conventional benchmarks.</li><li>Explore the gap between benchmark performance and effectiveness in the real world.</li><li>He shares the current and upcoming plans for Nixtla and its TimeGPT foundation model. </li><li>He shares his predictions for the future of time series foundation models.</li><li>Advice for leaders of AI-powered startups and what impact he aims to make with Nixtla.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Time series are in one aspect, the DNA of the world.” — Max Mergenthaler Canseco</p><p><br></p><p>“Time is an essential component to understand a change of course, but also to understand our reality. So, time series is maybe a somewhat technical term for a very familiar aspect of our reality.” — Max Mergenthaler Canseco</p><p><br></p><p>“Given that we are all training on massive amounts of data and some of us are not disclosing which datasets we’re using, it’s always a problem for academics to try to benchmark foundation models because there might be leakage.” — Max Mergenthaler Canseco</p><p><br></p><p>“That’s an interesting aspect of foundation models in time series, that benchmarking is not as straightforward as one might think.” — Max Mergenthaler Canseco</p><p><br></p><p>“I think right now in our field probably benchmarks are not necessarily indicative of how well a model is going to perform in real-world data.” — Max Mergenthaler Canseco</p><p><br></p><p>“I think that we’re also going to see some of those intuitions that come from the LLM field translated into the time series field soon.” — Max Mergenthaler Canseco</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/mergenthaler/">Max Mergenthaler Canseco on LinkedIn</a></p><p><a href="https://www.nixtla.io/">Nixtla</a></p><p><a href="https://x.com/nixtlainc">Nixtla on X</a></p><p><a href="https://www.linkedin.com/company/nixtlainc/">Nixtla on LinkedIn</a></p><p><a href="https://github.com/nixtla">Nixtla on GitHub</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, time series, foundation models</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/ad3afc00/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Foundation Model Series: Harnessing Multimodal Data to Advance Immunotherapies with Ron Alfa from Noetik</title>
      <itunes:episode>115</itunes:episode>
      <podcast:episode>115</podcast:episode>
      <itunes:title>Foundation Model Series: Harnessing Multimodal Data to Advance Immunotherapies with Ron Alfa from Noetik</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">ea0739a6-b54e-479f-a412-1558eb7549c3</guid>
      <link>https://pixelscientia.com/podcast/harnessing-multimodal-data-to-advance-immunotherapies-with-ron-alfa-from-noetik/</link>
      <description>
        <![CDATA[<p>In this episode, I'm joined by Ron Alfa, Co-Founder and CEO of Noetik, to discuss the groundbreaking role of foundation models in advancing cancer immunotherapy. Together, we explore why these models are essential to his work, what it takes to build a model that understands biology, and how Noetik is creating and sourcing their datasets. Ron also shares insights on scaling and training these models, the challenges his team has faced, and how effective analysis helps determine a model’s quality. To learn more about Noetik’s innovative achievements, Ron’s advice for leaders in AI-powered startups, and much more, be sure to tune in!</p><p><strong>Key Points:</strong></p><ul><li>Ron shares his background and how his journey led to Noetik.</li><li>Why a foundation model is important in their work.</li><li>What goes into building a foundation model that understands biology.</li><li>Building the dataset: where does the data come from?</li><li>The types of data they generate from the samples they use in their models.</li><li>He further explains the components necessary to build a foundation model.</li><li>The scale and what it takes to train these models. </li><li>Ron sheds light on the challenges they’ve encountered in building their foundation model.</li><li>How to determine if your foundation model is good. </li><li>Utilizing analysis to help identify ways to improve your model. </li><li>The current purpose for their foundation model and how they plan to use it in the future.</li><li>Key insights gained from developing foundation models and how these can be adapted to other types of data.</li><li>His advice to other leaders of AI-powered startups.</li><li>Ron digs deeper into their goal to impact patient care by developing new therapeutics.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our thesis for Noetik is that one of the biggest problems we can impact if we want to make and bring new drugs to patients is predicting clinical success; so-called translation — that's where we focus Noetik, how can we train foundation models of biology so that we can better translate therapeutics from early discovery and preclinical models to patients.” — Ron Alfa</p><p><br></p><p>“We think the most important thing for any application of machine learning is the data.” — Ron Alfa</p><p><br></p><p>“The goal here is to train models that can do what humans cannot do, that can understand biology that we haven't discovered yet.” — Ron Alfa</p><p><br></p><p>“The big aim of Noetik is to develop these [foundational] models for therapeutics discovery.” — Ron Alfa</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/ron-alfa/">Ron Alfa on LinkedIn</a></p><p><a href="https://x.com/ron_alfa">Ron Alfa on X</a></p><p><a href="https://www.noetik.ai/">Noetik</a></p><p><a href="https://www.noetik.ai/octo-vc">Noetik Octo Virtual Cell (OTCO)</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I'm joined by Ron Alfa, Co-Founder and CEO of Noetik, to discuss the groundbreaking role of foundation models in advancing cancer immunotherapy. Together, we explore why these models are essential to his work, what it takes to build a model that understands biology, and how Noetik is creating and sourcing their datasets. Ron also shares insights on scaling and training these models, the challenges his team has faced, and how effective analysis helps determine a model’s quality. To learn more about Noetik’s innovative achievements, Ron’s advice for leaders in AI-powered startups, and much more, be sure to tune in!</p><p><strong>Key Points:</strong></p><ul><li>Ron shares his background and how his journey led to Noetik.</li><li>Why a foundation model is important in their work.</li><li>What goes into building a foundation model that understands biology.</li><li>Building the dataset: where does the data come from?</li><li>The types of data they generate from the samples they use in their models.</li><li>He further explains the components necessary to build a foundation model.</li><li>The scale and what it takes to train these models. </li><li>Ron sheds light on the challenges they’ve encountered in building their foundation model.</li><li>How to determine if your foundation model is good. </li><li>Utilizing analysis to help identify ways to improve your model. </li><li>The current purpose for their foundation model and how they plan to use it in the future.</li><li>Key insights gained from developing foundation models and how these can be adapted to other types of data.</li><li>His advice to other leaders of AI-powered startups.</li><li>Ron digs deeper into their goal to impact patient care by developing new therapeutics.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our thesis for Noetik is that one of the biggest problems we can impact if we want to make and bring new drugs to patients is predicting clinical success; so-called translation — that's where we focus Noetik, how can we train foundation models of biology so that we can better translate therapeutics from early discovery and preclinical models to patients.” — Ron Alfa</p><p><br></p><p>“We think the most important thing for any application of machine learning is the data.” — Ron Alfa</p><p><br></p><p>“The goal here is to train models that can do what humans cannot do, that can understand biology that we haven't discovered yet.” — Ron Alfa</p><p><br></p><p>“The big aim of Noetik is to develop these [foundational] models for therapeutics discovery.” — Ron Alfa</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/ron-alfa/">Ron Alfa on LinkedIn</a></p><p><a href="https://x.com/ron_alfa">Ron Alfa on X</a></p><p><a href="https://www.noetik.ai/">Noetik</a></p><p><a href="https://www.noetik.ai/octo-vc">Noetik Octo Virtual Cell (OTCO)</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 03 Feb 2025 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/5d0d34cb/df28302d.mp3" length="48890203" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/CVIpfxW-eUQyrUVW194mIC4c2MrkyFfGMGnywflk-98/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS8zZDFh/NmU4MGNiNTNkNzUy/OGYxMWUwZGQyOGZi/OWI4MC5wbmc.jpg"/>
      <itunes:duration>2033</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode, I'm joined by Ron Alfa, Co-Founder and CEO of Noetik, to discuss the groundbreaking role of foundation models in advancing cancer immunotherapy. Together, we explore why these models are essential to his work, what it takes to build a model that understands biology, and how Noetik is creating and sourcing their datasets. Ron also shares insights on scaling and training these models, the challenges his team has faced, and how effective analysis helps determine a model’s quality. To learn more about Noetik’s innovative achievements, Ron’s advice for leaders in AI-powered startups, and much more, be sure to tune in!</p><p><strong>Key Points:</strong></p><ul><li>Ron shares his background and how his journey led to Noetik.</li><li>Why a foundation model is important in their work.</li><li>What goes into building a foundation model that understands biology.</li><li>Building the dataset: where does the data come from?</li><li>The types of data they generate from the samples they use in their models.</li><li>He further explains the components necessary to build a foundation model.</li><li>The scale and what it takes to train these models. </li><li>Ron sheds light on the challenges they’ve encountered in building their foundation model.</li><li>How to determine if your foundation model is good. </li><li>Utilizing analysis to help identify ways to improve your model. </li><li>The current purpose for their foundation model and how they plan to use it in the future.</li><li>Key insights gained from developing foundation models and how these can be adapted to other types of data.</li><li>His advice to other leaders of AI-powered startups.</li><li>Ron digs deeper into their goal to impact patient care by developing new therapeutics.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our thesis for Noetik is that one of the biggest problems we can impact if we want to make and bring new drugs to patients is predicting clinical success; so-called translation — that's where we focus Noetik, how can we train foundation models of biology so that we can better translate therapeutics from early discovery and preclinical models to patients.” — Ron Alfa</p><p><br></p><p>“We think the most important thing for any application of machine learning is the data.” — Ron Alfa</p><p><br></p><p>“The goal here is to train models that can do what humans cannot do, that can understand biology that we haven't discovered yet.” — Ron Alfa</p><p><br></p><p>“The big aim of Noetik is to develop these [foundational] models for therapeutics discovery.” — Ron Alfa</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/ron-alfa/">Ron Alfa on LinkedIn</a></p><p><a href="https://x.com/ron_alfa">Ron Alfa on X</a></p><p><a href="https://www.noetik.ai/">Noetik</a></p><p><a href="https://www.noetik.ai/octo-vc">Noetik Octo Virtual Cell (OTCO)</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, drug discovery, immunotherapy, foundation models</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/5d0d34cb/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Foundation Model Series: Accelerating Pathology Model Development Using Embeddings with Julianna Ianni from Proscia</title>
      <itunes:episode>114</itunes:episode>
      <podcast:episode>114</podcast:episode>
      <itunes:title>Foundation Model Series: Accelerating Pathology Model Development Using Embeddings with Julianna Ianni from Proscia</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">4cc7d5ce-df43-4c0e-b228-87da3c211387</guid>
      <link>https://pixelscientia.com/podcast/accelerating-pathology-model-development-using-embeddings-with-julianna-ianni-from-proscia/</link>
      <description>
        <![CDATA[<p>How can foundation models accelerate breakthroughs in precision medicine? In today’s episode of Impact AI, we explore this question with returning guest, Julianna Ianni, Vice President of AI Research and Development at Proscia, a company revolutionizing pathology through cutting-edge technology. Join us as we explore how their platform, Concentriq, and its new Embeddings feature are transforming AI model development, making pathology-driven insights faster and more accessible than ever before. You’ll also learn how Proscia is shaping the future of precision medicine and discover practical insights for leveraging AI to advance healthcare. Whether you're curious about pathology, AI, or innovations in precision medicine, this episode offers invaluable takeaways you won’t want to miss!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An overview of Julianna’s biomedical engineering background and Proscia's mission.</li><li>Insight into Proscia’s Concentriq platform, aiding more than two million diagnoses annually.</li><li>Ways that Concentriq Embeddings streamlines AI development by eliminating data friction.</li><li>How Concentriq Embeddings make model creation 13x faster than traditional methods.</li><li>Why Proscia integrates external foundation models for versatility and superior performance.</li><li>Flexible and efficient: how Concentriq lets users test, swap, and select models with ease.</li><li>Types of solutions built using these embeddings, including rapid biomarker detection.</li><li>Tackling AI challenges like reducing overfitting and addressing bias in medical applications.</li><li>Lessons from pathology: simplifying complex workflows for faster AI adoption in other fields.</li><li>A look at the future of foundation models for pathology and Julianna’s advice for innovators.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“With the rise of foundation models that are pathology-specific and more powerful than the models of yesterday, the ability to extract embeddings efficiently became even more important for us.” — Julianna Ianni</p><p><br></p><p>“The pathology world didn't need another hit movie. It needed a streaming service.” — Julianna Ianni</p><p><br></p><p>“[Continue] to innovate and [understand] what's out there. There's a lot of change in the [pathology] field right now – You're going to make plans and then you're going to need to remake those plans because things are changing so quickly.” — Julianna Ianni</p><p><br></p><p>“ChatGPT didn't pervade our culture because it's fantastic technology. It pervaded our culture because the fantastic technology was easy to use. Pathology should be that easy. Our aim is to drive it there.” — Julianna Ianni</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://proscia.com/">Proscia</a></p><p><a href="https://www.linkedin.com/in/julianna-ianni/">Julianna Ianni on LinkedIn</a></p><p><a href="https://x.com/juliannalog">Julianna Ianni on X</a></p><p><a href="https://scholar.google.com/citations?user=6FqXUIkAAAAJ">Julianna Ianni on Google Scholar</a></p><p><a href="https://proscia.com/concentriq-embeddings/">Concentriq Embeddings</a><br><a href="https://go.proscia.com/case-study-embeddings">Concentriq Embeddings internal case study</a><br><a href="https://github.com/Proscia/proscia-ai-tools">Proscia AI Toolkit</a><br><a href="https://proscia.com/video-tutorial-rapid-ai-development-with-concentriq-embeddings-zero-shot-tumor-detection-example/">Zero-Shot Tumor Detection Example</a></p><p>Previous episode of Impact AI: <a href="https://pixelscientia.com/podcast/data-driven-pathology-with-coleman-stavish-and-julianna-ianni-from-proscia/">Data-Driven Pathology with Coleman Stavish and Julianna Ianni from Proscia</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>How can foundation models accelerate breakthroughs in precision medicine? In today’s episode of Impact AI, we explore this question with returning guest, Julianna Ianni, Vice President of AI Research and Development at Proscia, a company revolutionizing pathology through cutting-edge technology. Join us as we explore how their platform, Concentriq, and its new Embeddings feature are transforming AI model development, making pathology-driven insights faster and more accessible than ever before. You’ll also learn how Proscia is shaping the future of precision medicine and discover practical insights for leveraging AI to advance healthcare. Whether you're curious about pathology, AI, or innovations in precision medicine, this episode offers invaluable takeaways you won’t want to miss!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An overview of Julianna’s biomedical engineering background and Proscia's mission.</li><li>Insight into Proscia’s Concentriq platform, aiding more than two million diagnoses annually.</li><li>Ways that Concentriq Embeddings streamlines AI development by eliminating data friction.</li><li>How Concentriq Embeddings make model creation 13x faster than traditional methods.</li><li>Why Proscia integrates external foundation models for versatility and superior performance.</li><li>Flexible and efficient: how Concentriq lets users test, swap, and select models with ease.</li><li>Types of solutions built using these embeddings, including rapid biomarker detection.</li><li>Tackling AI challenges like reducing overfitting and addressing bias in medical applications.</li><li>Lessons from pathology: simplifying complex workflows for faster AI adoption in other fields.</li><li>A look at the future of foundation models for pathology and Julianna’s advice for innovators.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“With the rise of foundation models that are pathology-specific and more powerful than the models of yesterday, the ability to extract embeddings efficiently became even more important for us.” — Julianna Ianni</p><p><br></p><p>“The pathology world didn't need another hit movie. It needed a streaming service.” — Julianna Ianni</p><p><br></p><p>“[Continue] to innovate and [understand] what's out there. There's a lot of change in the [pathology] field right now – You're going to make plans and then you're going to need to remake those plans because things are changing so quickly.” — Julianna Ianni</p><p><br></p><p>“ChatGPT didn't pervade our culture because it's fantastic technology. It pervaded our culture because the fantastic technology was easy to use. Pathology should be that easy. Our aim is to drive it there.” — Julianna Ianni</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://proscia.com/">Proscia</a></p><p><a href="https://www.linkedin.com/in/julianna-ianni/">Julianna Ianni on LinkedIn</a></p><p><a href="https://x.com/juliannalog">Julianna Ianni on X</a></p><p><a href="https://scholar.google.com/citations?user=6FqXUIkAAAAJ">Julianna Ianni on Google Scholar</a></p><p><a href="https://proscia.com/concentriq-embeddings/">Concentriq Embeddings</a><br><a href="https://go.proscia.com/case-study-embeddings">Concentriq Embeddings internal case study</a><br><a href="https://github.com/Proscia/proscia-ai-tools">Proscia AI Toolkit</a><br><a href="https://proscia.com/video-tutorial-rapid-ai-development-with-concentriq-embeddings-zero-shot-tumor-detection-example/">Zero-Shot Tumor Detection Example</a></p><p>Previous episode of Impact AI: <a href="https://pixelscientia.com/podcast/data-driven-pathology-with-coleman-stavish-and-julianna-ianni-from-proscia/">Data-Driven Pathology with Coleman Stavish and Julianna Ianni from Proscia</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 27 Jan 2025 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/175df748/4ba4a13e.mp3" length="30193301" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/UaK_MREOUOQqdus01VYh0sqS01GN2cpM0Xdz4CoKMIM/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS8xN2Vl/YTI5MjA5ZTQ4MmQy/MDgxNWVjMzQ5MDU2/YjhhNC5qcGVn.jpg"/>
      <itunes:duration>1251</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>How can foundation models accelerate breakthroughs in precision medicine? In today’s episode of Impact AI, we explore this question with returning guest, Julianna Ianni, Vice President of AI Research and Development at Proscia, a company revolutionizing pathology through cutting-edge technology. Join us as we explore how their platform, Concentriq, and its new Embeddings feature are transforming AI model development, making pathology-driven insights faster and more accessible than ever before. You’ll also learn how Proscia is shaping the future of precision medicine and discover practical insights for leveraging AI to advance healthcare. Whether you're curious about pathology, AI, or innovations in precision medicine, this episode offers invaluable takeaways you won’t want to miss!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An overview of Julianna’s biomedical engineering background and Proscia's mission.</li><li>Insight into Proscia’s Concentriq platform, aiding more than two million diagnoses annually.</li><li>Ways that Concentriq Embeddings streamlines AI development by eliminating data friction.</li><li>How Concentriq Embeddings make model creation 13x faster than traditional methods.</li><li>Why Proscia integrates external foundation models for versatility and superior performance.</li><li>Flexible and efficient: how Concentriq lets users test, swap, and select models with ease.</li><li>Types of solutions built using these embeddings, including rapid biomarker detection.</li><li>Tackling AI challenges like reducing overfitting and addressing bias in medical applications.</li><li>Lessons from pathology: simplifying complex workflows for faster AI adoption in other fields.</li><li>A look at the future of foundation models for pathology and Julianna’s advice for innovators.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“With the rise of foundation models that are pathology-specific and more powerful than the models of yesterday, the ability to extract embeddings efficiently became even more important for us.” — Julianna Ianni</p><p><br></p><p>“The pathology world didn't need another hit movie. It needed a streaming service.” — Julianna Ianni</p><p><br></p><p>“[Continue] to innovate and [understand] what's out there. There's a lot of change in the [pathology] field right now – You're going to make plans and then you're going to need to remake those plans because things are changing so quickly.” — Julianna Ianni</p><p><br></p><p>“ChatGPT didn't pervade our culture because it's fantastic technology. It pervaded our culture because the fantastic technology was easy to use. Pathology should be that easy. Our aim is to drive it there.” — Julianna Ianni</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://proscia.com/">Proscia</a></p><p><a href="https://www.linkedin.com/in/julianna-ianni/">Julianna Ianni on LinkedIn</a></p><p><a href="https://x.com/juliannalog">Julianna Ianni on X</a></p><p><a href="https://scholar.google.com/citations?user=6FqXUIkAAAAJ">Julianna Ianni on Google Scholar</a></p><p><a href="https://proscia.com/concentriq-embeddings/">Concentriq Embeddings</a><br><a href="https://go.proscia.com/case-study-embeddings">Concentriq Embeddings internal case study</a><br><a href="https://github.com/Proscia/proscia-ai-tools">Proscia AI Toolkit</a><br><a href="https://proscia.com/video-tutorial-rapid-ai-development-with-concentriq-embeddings-zero-shot-tumor-detection-example/">Zero-Shot Tumor Detection Example</a></p><p>Previous episode of Impact AI: <a href="https://pixelscientia.com/podcast/data-driven-pathology-with-coleman-stavish-and-julianna-ianni-from-proscia/">Data-Driven Pathology with Coleman Stavish and Julianna Ianni from Proscia</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, pathology, foundation models, medical imaging</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/175df748/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Actionable Soil Insights with Benjamin De Leener from ChrysaLabs</title>
      <itunes:episode>113</itunes:episode>
      <podcast:episode>113</podcast:episode>
      <itunes:title>Actionable Soil Insights with Benjamin De Leener from ChrysaLabs</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">67a53190-c391-4c38-8dda-d9c0218df79f</guid>
      <link>https://pixelscientia.com/podcast/actionable-soil-insights-with-benjamin-de-leener-from-chrysalabs/</link>
      <description>
        <![CDATA[<p>With farmers sometimes waiting weeks for lab results to make critical decisions, Benjamin De Leener, Co-Founder and Chief Science Officer of ChrysaLabs, sought to transform the future of soil health. ChrysaLabs has developed a groundbreaking handheld, AI-powered probe that delivers fast field-ready insights into soil properties like pH, nutrients, and organic matter.</p><p>In this episode of Impact AI, Benjamin dives into the journey of creating this innovative tool, the challenges of working with complex agricultural data, and the role of machine learning in empowering farmers to make sustainable, data-driven decisions. Tune in to discover how this technology is not only boosting farming efficiency but also contributing to a healthier ecosystem and the fight against climate change!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Benjamin’s biomedical engineering background and how it led him to start ChrysaLabs.</li><li>How ChrysaLabs’ portable probe provides real-time soil analysis.</li><li>The role of machine learning in converting spectroscopy data into actionable soil insights.</li><li>Challenges in acquiring diverse, high-quality soil data for model training.</li><li>Addressing variability in soil and lab measurements to ensure model accuracy.</li><li>What goes into ChrysaLabs’ validation techniques to maintain robust, reliable AI models.</li><li>Considerations for overcoming seasonal constraints in agricultural data collection.</li><li>Technological advancements that have enabled portable, cost-effective sensors.</li><li>Advice for AI-powered startups: balance data volume with variability management.</li><li>Collaborative efforts between agronomists and machine learning engineers at ChrysaLabs.</li><li>ChrysaLabs’ vision for improving soil health and combating climate change.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“There’s a translation between the light information that we receive from the spectrometer and the information that is actionable for the farmers and agronomists. The machine learning models are between the hardware, the application, and what the farmers can do.” — Benjamin De Leener</p><p><br></p><p>“The main challenge that the agronomists and the farmers have is the data about what’s in the soil. So, that’s what we provide.” — Benjamin De Leener</p><p><br></p><p>“The more data you accumulate, the bigger the variability that you need to take into account. It’s not always better to think, ‘The more data I have, the better’ because sometimes, the less data, the more focused the models are.” — Benjamin De Leener</p><p><br></p><p>“We want to combat climate change – [We believe] that the soil can sequester a lot of carbon through agriculture, and we want to provide a way to measure that so that, when we choose one agronomical practice over another, we understand what we’re doing.” — Benjamin De Leener</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.chrysalabs.com/">ChrysaLabs</a></p><p><a href="https://www.chrysalabs.com/insightlabs/">ChrysaLabs InsightLabs</a></p><p><a href="https://www.linkedin.com/in/benjamindeleener/">Benjamin De Leener on LinkedIn</a></p><p><a href="https://scholar.google.ca/citations?user=hXOe8XkAAAAJ&amp;hl=en">Benjamin De Leener on Google Scholar</a></p><p><a href="https://x.com/BenDeLeener">Benjamin De Leener on X</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>With farmers sometimes waiting weeks for lab results to make critical decisions, Benjamin De Leener, Co-Founder and Chief Science Officer of ChrysaLabs, sought to transform the future of soil health. ChrysaLabs has developed a groundbreaking handheld, AI-powered probe that delivers fast field-ready insights into soil properties like pH, nutrients, and organic matter.</p><p>In this episode of Impact AI, Benjamin dives into the journey of creating this innovative tool, the challenges of working with complex agricultural data, and the role of machine learning in empowering farmers to make sustainable, data-driven decisions. Tune in to discover how this technology is not only boosting farming efficiency but also contributing to a healthier ecosystem and the fight against climate change!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Benjamin’s biomedical engineering background and how it led him to start ChrysaLabs.</li><li>How ChrysaLabs’ portable probe provides real-time soil analysis.</li><li>The role of machine learning in converting spectroscopy data into actionable soil insights.</li><li>Challenges in acquiring diverse, high-quality soil data for model training.</li><li>Addressing variability in soil and lab measurements to ensure model accuracy.</li><li>What goes into ChrysaLabs’ validation techniques to maintain robust, reliable AI models.</li><li>Considerations for overcoming seasonal constraints in agricultural data collection.</li><li>Technological advancements that have enabled portable, cost-effective sensors.</li><li>Advice for AI-powered startups: balance data volume with variability management.</li><li>Collaborative efforts between agronomists and machine learning engineers at ChrysaLabs.</li><li>ChrysaLabs’ vision for improving soil health and combating climate change.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“There’s a translation between the light information that we receive from the spectrometer and the information that is actionable for the farmers and agronomists. The machine learning models are between the hardware, the application, and what the farmers can do.” — Benjamin De Leener</p><p><br></p><p>“The main challenge that the agronomists and the farmers have is the data about what’s in the soil. So, that’s what we provide.” — Benjamin De Leener</p><p><br></p><p>“The more data you accumulate, the bigger the variability that you need to take into account. It’s not always better to think, ‘The more data I have, the better’ because sometimes, the less data, the more focused the models are.” — Benjamin De Leener</p><p><br></p><p>“We want to combat climate change – [We believe] that the soil can sequester a lot of carbon through agriculture, and we want to provide a way to measure that so that, when we choose one agronomical practice over another, we understand what we’re doing.” — Benjamin De Leener</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.chrysalabs.com/">ChrysaLabs</a></p><p><a href="https://www.chrysalabs.com/insightlabs/">ChrysaLabs InsightLabs</a></p><p><a href="https://www.linkedin.com/in/benjamindeleener/">Benjamin De Leener on LinkedIn</a></p><p><a href="https://scholar.google.ca/citations?user=hXOe8XkAAAAJ&amp;hl=en">Benjamin De Leener on Google Scholar</a></p><p><a href="https://x.com/BenDeLeener">Benjamin De Leener on X</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 06 Jan 2025 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/bf521730/7c030dd2.mp3" length="19511660" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/XGkdkjEa8d3WaOLT_s-Ra4uBRQdXwPopn1erirALtK8/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9kYWU0/OWI0NmJkNDdkYzc3/MGU3NTAxZmUzZjkz/MWI1MS5wbmc.jpg"/>
      <itunes:duration>1210</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>With farmers sometimes waiting weeks for lab results to make critical decisions, Benjamin De Leener, Co-Founder and Chief Science Officer of ChrysaLabs, sought to transform the future of soil health. ChrysaLabs has developed a groundbreaking handheld, AI-powered probe that delivers fast field-ready insights into soil properties like pH, nutrients, and organic matter.</p><p>In this episode of Impact AI, Benjamin dives into the journey of creating this innovative tool, the challenges of working with complex agricultural data, and the role of machine learning in empowering farmers to make sustainable, data-driven decisions. Tune in to discover how this technology is not only boosting farming efficiency but also contributing to a healthier ecosystem and the fight against climate change!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Benjamin’s biomedical engineering background and how it led him to start ChrysaLabs.</li><li>How ChrysaLabs’ portable probe provides real-time soil analysis.</li><li>The role of machine learning in converting spectroscopy data into actionable soil insights.</li><li>Challenges in acquiring diverse, high-quality soil data for model training.</li><li>Addressing variability in soil and lab measurements to ensure model accuracy.</li><li>What goes into ChrysaLabs’ validation techniques to maintain robust, reliable AI models.</li><li>Considerations for overcoming seasonal constraints in agricultural data collection.</li><li>Technological advancements that have enabled portable, cost-effective sensors.</li><li>Advice for AI-powered startups: balance data volume with variability management.</li><li>Collaborative efforts between agronomists and machine learning engineers at ChrysaLabs.</li><li>ChrysaLabs’ vision for improving soil health and combating climate change.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“There’s a translation between the light information that we receive from the spectrometer and the information that is actionable for the farmers and agronomists. The machine learning models are between the hardware, the application, and what the farmers can do.” — Benjamin De Leener</p><p><br></p><p>“The main challenge that the agronomists and the farmers have is the data about what’s in the soil. So, that’s what we provide.” — Benjamin De Leener</p><p><br></p><p>“The more data you accumulate, the bigger the variability that you need to take into account. It’s not always better to think, ‘The more data I have, the better’ because sometimes, the less data, the more focused the models are.” — Benjamin De Leener</p><p><br></p><p>“We want to combat climate change – [We believe] that the soil can sequester a lot of carbon through agriculture, and we want to provide a way to measure that so that, when we choose one agronomical practice over another, we understand what we’re doing.” — Benjamin De Leener</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.chrysalabs.com/">ChrysaLabs</a></p><p><a href="https://www.chrysalabs.com/insightlabs/">ChrysaLabs InsightLabs</a></p><p><a href="https://www.linkedin.com/in/benjamindeleener/">Benjamin De Leener on LinkedIn</a></p><p><a href="https://scholar.google.ca/citations?user=hXOe8XkAAAAJ&amp;hl=en">Benjamin De Leener on Google Scholar</a></p><p><a href="https://x.com/BenDeLeener">Benjamin De Leener on X</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, agriculture, farming, soil, spectroscopy</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/bf521730/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Advancing Therapies for Immune Diseases with Kfir Schreiber from DeepCure</title>
      <itunes:episode>112</itunes:episode>
      <podcast:episode>112</podcast:episode>
      <itunes:title>Advancing Therapies for Immune Diseases with Kfir Schreiber from DeepCure</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">d1c127fc-d78b-4a17-84d1-f1d827befd30</guid>
      <link>https://pixelscientia.com/podcast/advancing-therapies-for-immune-diseases-with-kfir-schreiber-from-deepcure/</link>
      <description>
        <![CDATA[<p>Can AI cure autoimmune diseases? This episode of Impact AI dives into the groundbreaking work of DeepCure, where artificial intelligence meets medicinal chemistry to tackle some of healthcare's most stubborn challenges. Co-founder and CEO Kfir Schreiber shares how his team uses advanced machine learning tools, physics simulations, and human expertise to design the next generation of small molecule drugs. From overcoming data limitations to fostering tight collaboration between machine learning scientists and chemists, this discussion illuminates the potential of AI-driven innovation in transforming patient outcomes. With a rheumatoid arthritis drug nearing clinical trials, DeepCure is poised to redefine the future of medicine. Tune in to discover how AI can accelerate drug discovery, overcome data challenges, and create life-changing therapies, as well as how these insights can inspire your own innovative pursuits!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>How Kfir's background in computer science and applied math led him to found DeepCure.</li><li>Insight into DeepCure’s mission to leverage proprietary technology to create small molecule drugs for inflammation and autoimmunity.</li><li>Augmenting human expertise with AI: the role of machine learning in drug discovery.</li><li>Layers of using AI to analyze targets and design small molecules with optimized properties.</li><li>Challenges in small molecule datasets and how DeepCure develops tailored models.</li><li>The influence of molecule representations like SMILES on machine learning models.</li><li>Combining publicly available datasets with data generated in DeepCure’s automation lab.</li><li>Model validation techniques to address out-of-distribution challenges in small molecule data.</li><li>Collaboration between machine learning experts and chemists to refine drug discovery.</li><li>Recruiting top talent by highlighting DeepCure’s impactful mission in healthcare.</li><li>The process of onboarding machine learning developers with no prior chemistry knowledge.</li><li>Problem-solving advice for leaders of AI-powered startups: it’s not about the AI!</li><li>DeepCure’s future plans for clinical trials and expansion into other autoimmune diseases.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Machine learning in our space is almost never a complete solution. It's a way to augment our chemists [and] our biologists [to] try to make them capable of solving problems that were unsolved before.” — Kfir Schreiber</p><p><br></p><p>“One of the best things about DeepCure [is the] very tight collaboration between the domain experts and our machine learning scientists.” — Kfir Schreiber</p><p><br></p><p>“Your average machine-learning scientist doesn't have chemistry intuition. We need this feedback and we need to integrate this feedback back into our models to make the predictions make sense.” — Kfir Schreiber</p><p><br></p><p>“Focus on the problem, focus on the value, and work your way backwards to the best tools to use.” — Kfir Schreiber</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://deepcure.com/">DeepCure<br></a><a href="https://www.linkedin.com/in/kfir-schreiber/">Kfir Schreiber on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Can AI cure autoimmune diseases? This episode of Impact AI dives into the groundbreaking work of DeepCure, where artificial intelligence meets medicinal chemistry to tackle some of healthcare's most stubborn challenges. Co-founder and CEO Kfir Schreiber shares how his team uses advanced machine learning tools, physics simulations, and human expertise to design the next generation of small molecule drugs. From overcoming data limitations to fostering tight collaboration between machine learning scientists and chemists, this discussion illuminates the potential of AI-driven innovation in transforming patient outcomes. With a rheumatoid arthritis drug nearing clinical trials, DeepCure is poised to redefine the future of medicine. Tune in to discover how AI can accelerate drug discovery, overcome data challenges, and create life-changing therapies, as well as how these insights can inspire your own innovative pursuits!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>How Kfir's background in computer science and applied math led him to found DeepCure.</li><li>Insight into DeepCure’s mission to leverage proprietary technology to create small molecule drugs for inflammation and autoimmunity.</li><li>Augmenting human expertise with AI: the role of machine learning in drug discovery.</li><li>Layers of using AI to analyze targets and design small molecules with optimized properties.</li><li>Challenges in small molecule datasets and how DeepCure develops tailored models.</li><li>The influence of molecule representations like SMILES on machine learning models.</li><li>Combining publicly available datasets with data generated in DeepCure’s automation lab.</li><li>Model validation techniques to address out-of-distribution challenges in small molecule data.</li><li>Collaboration between machine learning experts and chemists to refine drug discovery.</li><li>Recruiting top talent by highlighting DeepCure’s impactful mission in healthcare.</li><li>The process of onboarding machine learning developers with no prior chemistry knowledge.</li><li>Problem-solving advice for leaders of AI-powered startups: it’s not about the AI!</li><li>DeepCure’s future plans for clinical trials and expansion into other autoimmune diseases.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Machine learning in our space is almost never a complete solution. It's a way to augment our chemists [and] our biologists [to] try to make them capable of solving problems that were unsolved before.” — Kfir Schreiber</p><p><br></p><p>“One of the best things about DeepCure [is the] very tight collaboration between the domain experts and our machine learning scientists.” — Kfir Schreiber</p><p><br></p><p>“Your average machine-learning scientist doesn't have chemistry intuition. We need this feedback and we need to integrate this feedback back into our models to make the predictions make sense.” — Kfir Schreiber</p><p><br></p><p>“Focus on the problem, focus on the value, and work your way backwards to the best tools to use.” — Kfir Schreiber</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://deepcure.com/">DeepCure<br></a><a href="https://www.linkedin.com/in/kfir-schreiber/">Kfir Schreiber on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 16 Dec 2024 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/eb495280/0e7177fa.mp3" length="29503238" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/SOdZ5tJeLl9bRmdO2i3ic0TFIwUt1c8sdYZhSjfaC-g/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS83NTcz/MzA3YmE1YWMwNWZl/YTY0MGI3NzU1N2Qy/ZWNhZS5qcGVn.jpg"/>
      <itunes:duration>1228</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Can AI cure autoimmune diseases? This episode of Impact AI dives into the groundbreaking work of DeepCure, where artificial intelligence meets medicinal chemistry to tackle some of healthcare's most stubborn challenges. Co-founder and CEO Kfir Schreiber shares how his team uses advanced machine learning tools, physics simulations, and human expertise to design the next generation of small molecule drugs. From overcoming data limitations to fostering tight collaboration between machine learning scientists and chemists, this discussion illuminates the potential of AI-driven innovation in transforming patient outcomes. With a rheumatoid arthritis drug nearing clinical trials, DeepCure is poised to redefine the future of medicine. Tune in to discover how AI can accelerate drug discovery, overcome data challenges, and create life-changing therapies, as well as how these insights can inspire your own innovative pursuits!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>How Kfir's background in computer science and applied math led him to found DeepCure.</li><li>Insight into DeepCure’s mission to leverage proprietary technology to create small molecule drugs for inflammation and autoimmunity.</li><li>Augmenting human expertise with AI: the role of machine learning in drug discovery.</li><li>Layers of using AI to analyze targets and design small molecules with optimized properties.</li><li>Challenges in small molecule datasets and how DeepCure develops tailored models.</li><li>The influence of molecule representations like SMILES on machine learning models.</li><li>Combining publicly available datasets with data generated in DeepCure’s automation lab.</li><li>Model validation techniques to address out-of-distribution challenges in small molecule data.</li><li>Collaboration between machine learning experts and chemists to refine drug discovery.</li><li>Recruiting top talent by highlighting DeepCure’s impactful mission in healthcare.</li><li>The process of onboarding machine learning developers with no prior chemistry knowledge.</li><li>Problem-solving advice for leaders of AI-powered startups: it’s not about the AI!</li><li>DeepCure’s future plans for clinical trials and expansion into other autoimmune diseases.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Machine learning in our space is almost never a complete solution. It's a way to augment our chemists [and] our biologists [to] try to make them capable of solving problems that were unsolved before.” — Kfir Schreiber</p><p><br></p><p>“One of the best things about DeepCure [is the] very tight collaboration between the domain experts and our machine learning scientists.” — Kfir Schreiber</p><p><br></p><p>“Your average machine-learning scientist doesn't have chemistry intuition. We need this feedback and we need to integrate this feedback back into our models to make the predictions make sense.” — Kfir Schreiber</p><p><br></p><p>“Focus on the problem, focus on the value, and work your way backwards to the best tools to use.” — Kfir Schreiber</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://deepcure.com/">DeepCure<br></a><a href="https://www.linkedin.com/in/kfir-schreiber/">Kfir Schreiber on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, immune disease, drug discovery</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/eb495280/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Unlocking Unstructured Health Data with David Sontag from Layer Health</title>
      <itunes:episode>111</itunes:episode>
      <podcast:episode>111</podcast:episode>
      <itunes:title>Unlocking Unstructured Health Data with David Sontag from Layer Health</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">78655902-492b-4405-a2ea-df1a85775326</guid>
      <link>https://pixelscientia.com/podcast/unlocking-unstructured-health-data-with-david-sontag-from-layer-health/</link>
      <description>
        <![CDATA[<p>What if we could unlock the hidden potential of unstructured health data to improve patient outcomes? In this episode, I sit down with David Sontag, co-founder and CEO of Layer Health, to discuss the transformative role of AI in healthcare. David, an MIT professor (on leave) and leading machine learning researcher, delves into how Layer Health addresses one of healthcare’s most persistent challenges: extracting actionable insights from unstructured medical data. In our conversation, David explains how Layer Health’s AI platform automates complex chart review tasks, tackles data generalization issues across diverse healthcare systems, and overcomes challenges like bias and dataset shifts. We explore Layer Health’s groundbreaking use of large language models (LLMs), the importance of scalable AI solutions, and the integration of AI into clinical workflows. Join us to discover how Layer Health is reducing administrative burdens, improving data accessibility, and shaping the future of AI-powered healthcare with David Sontag.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Hear about David's career journey from MIT professor to CEO of Layer Health.</li><li>How Layer Health transforms chart reviews and enhances healthcare workflows.</li><li>The role of large language models in solving the company's scalability problems.</li><li>Learn about Layer Health's approach to benchmarking performance for institutions.</li><li>Explore how the company navigates dataset shifts and ensures robust model performance.</li><li>Discover Layer Health's strategies to identify and mitigate bias in clinical AI models.</li><li>Find out about the challenges of implementing reasoning across diverse medical records.</li><li>Why building trust through data transparency, auditing, and compliance are essential.</li><li>David’s advice for AI startup leaders on balancing research with practical implementation.</li><li>Layer Health's long-term vision for reshaping healthcare and improving patient outcomes.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our vision for Layer Health is to transform healthcare with artificial intelligence, really building upon all of the work that we've been doing over the past decade in the AI and health field and academic space.” — David Sontag</p><p><br></p><p>“What we realized very quickly is that where [Layer Health] would have the biggest impact was bringing the right information to the physician's fingertips at the right point in time.” — David Sontag</p><p><br></p><p>“We're using large language models to drive the abstraction of those clinical variables that we need for these either retrospective or prospective use cases.” — David Sontag</p><p><br></p><p>“Where I think we're going to see the biggest source of bias is likely going to be not along the traditional demographic-related quantities, but rather on more clinical quantities.” — David Sontag</p><p><br></p><p>“A lot of the friction that we currently see in healthcare, [Layer Health] is going to really take a big bite out of [it].” — David Sontag</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://people.csail.mit.edu/dsontag/">David Sontag</a></p><p><a href="https://www.linkedin.com/in/david-sontag/">David Sontag on LinkedIn</a></p><p><a href="https://www.layerhealth.com/">Layer Health</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>What if we could unlock the hidden potential of unstructured health data to improve patient outcomes? In this episode, I sit down with David Sontag, co-founder and CEO of Layer Health, to discuss the transformative role of AI in healthcare. David, an MIT professor (on leave) and leading machine learning researcher, delves into how Layer Health addresses one of healthcare’s most persistent challenges: extracting actionable insights from unstructured medical data. In our conversation, David explains how Layer Health’s AI platform automates complex chart review tasks, tackles data generalization issues across diverse healthcare systems, and overcomes challenges like bias and dataset shifts. We explore Layer Health’s groundbreaking use of large language models (LLMs), the importance of scalable AI solutions, and the integration of AI into clinical workflows. Join us to discover how Layer Health is reducing administrative burdens, improving data accessibility, and shaping the future of AI-powered healthcare with David Sontag.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Hear about David's career journey from MIT professor to CEO of Layer Health.</li><li>How Layer Health transforms chart reviews and enhances healthcare workflows.</li><li>The role of large language models in solving the company's scalability problems.</li><li>Learn about Layer Health's approach to benchmarking performance for institutions.</li><li>Explore how the company navigates dataset shifts and ensures robust model performance.</li><li>Discover Layer Health's strategies to identify and mitigate bias in clinical AI models.</li><li>Find out about the challenges of implementing reasoning across diverse medical records.</li><li>Why building trust through data transparency, auditing, and compliance are essential.</li><li>David’s advice for AI startup leaders on balancing research with practical implementation.</li><li>Layer Health's long-term vision for reshaping healthcare and improving patient outcomes.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our vision for Layer Health is to transform healthcare with artificial intelligence, really building upon all of the work that we've been doing over the past decade in the AI and health field and academic space.” — David Sontag</p><p><br></p><p>“What we realized very quickly is that where [Layer Health] would have the biggest impact was bringing the right information to the physician's fingertips at the right point in time.” — David Sontag</p><p><br></p><p>“We're using large language models to drive the abstraction of those clinical variables that we need for these either retrospective or prospective use cases.” — David Sontag</p><p><br></p><p>“Where I think we're going to see the biggest source of bias is likely going to be not along the traditional demographic-related quantities, but rather on more clinical quantities.” — David Sontag</p><p><br></p><p>“A lot of the friction that we currently see in healthcare, [Layer Health] is going to really take a big bite out of [it].” — David Sontag</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://people.csail.mit.edu/dsontag/">David Sontag</a></p><p><a href="https://www.linkedin.com/in/david-sontag/">David Sontag on LinkedIn</a></p><p><a href="https://www.layerhealth.com/">Layer Health</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 09 Dec 2024 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/92eaf21e/a63034a7.mp3" length="39248803" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/vrsVGKDFYG-JkVrurrqI7PmpfMCUQk0NrLzYCy15jcU/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9iNGYx/NTBkN2I1ZGQxZWFm/NTMyYzhkM2MzNTE0/Y2Q1Ni5wbmc.jpg"/>
      <itunes:duration>1634</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>What if we could unlock the hidden potential of unstructured health data to improve patient outcomes? In this episode, I sit down with David Sontag, co-founder and CEO of Layer Health, to discuss the transformative role of AI in healthcare. David, an MIT professor (on leave) and leading machine learning researcher, delves into how Layer Health addresses one of healthcare’s most persistent challenges: extracting actionable insights from unstructured medical data. In our conversation, David explains how Layer Health’s AI platform automates complex chart review tasks, tackles data generalization issues across diverse healthcare systems, and overcomes challenges like bias and dataset shifts. We explore Layer Health’s groundbreaking use of large language models (LLMs), the importance of scalable AI solutions, and the integration of AI into clinical workflows. Join us to discover how Layer Health is reducing administrative burdens, improving data accessibility, and shaping the future of AI-powered healthcare with David Sontag.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Hear about David's career journey from MIT professor to CEO of Layer Health.</li><li>How Layer Health transforms chart reviews and enhances healthcare workflows.</li><li>The role of large language models in solving the company's scalability problems.</li><li>Learn about Layer Health's approach to benchmarking performance for institutions.</li><li>Explore how the company navigates dataset shifts and ensures robust model performance.</li><li>Discover Layer Health's strategies to identify and mitigate bias in clinical AI models.</li><li>Find out about the challenges of implementing reasoning across diverse medical records.</li><li>Why building trust through data transparency, auditing, and compliance are essential.</li><li>David’s advice for AI startup leaders on balancing research with practical implementation.</li><li>Layer Health's long-term vision for reshaping healthcare and improving patient outcomes.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our vision for Layer Health is to transform healthcare with artificial intelligence, really building upon all of the work that we've been doing over the past decade in the AI and health field and academic space.” — David Sontag</p><p><br></p><p>“What we realized very quickly is that where [Layer Health] would have the biggest impact was bringing the right information to the physician's fingertips at the right point in time.” — David Sontag</p><p><br></p><p>“We're using large language models to drive the abstraction of those clinical variables that we need for these either retrospective or prospective use cases.” — David Sontag</p><p><br></p><p>“Where I think we're going to see the biggest source of bias is likely going to be not along the traditional demographic-related quantities, but rather on more clinical quantities.” — David Sontag</p><p><br></p><p>“A lot of the friction that we currently see in healthcare, [Layer Health] is going to really take a big bite out of [it].” — David Sontag</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://people.csail.mit.edu/dsontag/">David Sontag</a></p><p><a href="https://www.linkedin.com/in/david-sontag/">David Sontag on LinkedIn</a></p><p><a href="https://www.layerhealth.com/">Layer Health</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, healthcare, language models</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/92eaf21e/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Discovering Protein Drug Candidates with Hanadie Yousef from Juvena Therapeutics</title>
      <itunes:episode>110</itunes:episode>
      <podcast:episode>110</podcast:episode>
      <itunes:title>Discovering Protein Drug Candidates with Hanadie Yousef from Juvena Therapeutics</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">68a52253-eae6-48ac-9a55-e1b60d274175</guid>
      <link>https://pixelscientia.com/podcast/discovering-protein-drug-candidates-with-hanadie-yousef-from-juvena-therapeutics/</link>
      <description>
        <![CDATA[<p>How can advancements in biotechnology and machine learning lead to revolutionary treatments for age-related diseases? In this episode, I speak with Hanadie Yousef, CEO and Co-Founder of Juvena Therapeutics, to discuss her work on protein-based therapeutics. Hanadie, a neurobiologist specializing in aging and tissue degeneration, has pioneered research at Juvena to identify regenerative proteins that can restore tissue function and combat chronic diseases.</p><p>In our conversation, Hanadie details Juvena’s AI-driven platform that identifies, validates, and engineers protein candidates with therapeutic potential. We explore the power of machine learning models in drug discovery, the challenges of working with multi-omics data, and the potential for new treatments to revolutionize healthcare by targeting disease at the molecular level. Join us to hear how Juvena Therapeutics is setting a new standard in precision medicine aimed at helping individuals age with vitality.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>The founding story of Juvena Therapeutics and its mission to restore tissue health.</li><li>How the company leverages AI to identify regenerative proteins from stem cell secretions.</li><li>Learn how Juvena's machine learning models enable targeted protein engineering.</li><li>Explore the different types of data that Juvena utilizes and how they are structured.</li><li>Hear about the benefits of in-house data generation for model training and validation.</li><li>Discover the challenges of generating sufficient data for accurate model predictions.</li><li>Technological advancements in proteomics and multi-omics that support its platform.</li><li>Hanadie shares advice for AI-driven startups and her hopes for Juvena's future impact.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Juvena is part of really, a new approach to leveraging the biology of aging and underlying mechanisms associated with why our tissues decline in function, in order to target this biology so that we can treat a broad swath of diseases.” — Hanadie Yousef</p><p><br></p><p>“That's ultimately the goal of Juvena, to really enable people to age with dignity, to continue to contribute to society, and to really maintain their health until the very end.” — Hanadie Yousef</p><p><br></p><p>“Ultimately, [machine learning is] leveraged at every stage of the process from in silico prediction, and screening through to the actual engineering and drug development.” — Hanadie Yousef</p><p><br></p><p>“When it comes to wet lab data generation, sometimes you're really limited by just the quantity of data that you can generate.” — Hanadie Yousef</p><p><br></p><p>“AI isn't the solution to everything. Oftentimes, you do still want to have that human in the loop and really test the accuracy of these models.” — Hanadie Yousef</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/hanadie-yousef-a2120026/">Hanadie Yousef on LinkedIn</a></p><p><a href="https://www.juvenatherapeutics.com">Juvena Therapeutics</a></p><p><a href="https://www.linkedin.com/company/juvena-therapeutics/">Juvena Therapeutics on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>How can advancements in biotechnology and machine learning lead to revolutionary treatments for age-related diseases? In this episode, I speak with Hanadie Yousef, CEO and Co-Founder of Juvena Therapeutics, to discuss her work on protein-based therapeutics. Hanadie, a neurobiologist specializing in aging and tissue degeneration, has pioneered research at Juvena to identify regenerative proteins that can restore tissue function and combat chronic diseases.</p><p>In our conversation, Hanadie details Juvena’s AI-driven platform that identifies, validates, and engineers protein candidates with therapeutic potential. We explore the power of machine learning models in drug discovery, the challenges of working with multi-omics data, and the potential for new treatments to revolutionize healthcare by targeting disease at the molecular level. Join us to hear how Juvena Therapeutics is setting a new standard in precision medicine aimed at helping individuals age with vitality.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>The founding story of Juvena Therapeutics and its mission to restore tissue health.</li><li>How the company leverages AI to identify regenerative proteins from stem cell secretions.</li><li>Learn how Juvena's machine learning models enable targeted protein engineering.</li><li>Explore the different types of data that Juvena utilizes and how they are structured.</li><li>Hear about the benefits of in-house data generation for model training and validation.</li><li>Discover the challenges of generating sufficient data for accurate model predictions.</li><li>Technological advancements in proteomics and multi-omics that support its platform.</li><li>Hanadie shares advice for AI-driven startups and her hopes for Juvena's future impact.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Juvena is part of really, a new approach to leveraging the biology of aging and underlying mechanisms associated with why our tissues decline in function, in order to target this biology so that we can treat a broad swath of diseases.” — Hanadie Yousef</p><p><br></p><p>“That's ultimately the goal of Juvena, to really enable people to age with dignity, to continue to contribute to society, and to really maintain their health until the very end.” — Hanadie Yousef</p><p><br></p><p>“Ultimately, [machine learning is] leveraged at every stage of the process from in silico prediction, and screening through to the actual engineering and drug development.” — Hanadie Yousef</p><p><br></p><p>“When it comes to wet lab data generation, sometimes you're really limited by just the quantity of data that you can generate.” — Hanadie Yousef</p><p><br></p><p>“AI isn't the solution to everything. Oftentimes, you do still want to have that human in the loop and really test the accuracy of these models.” — Hanadie Yousef</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/hanadie-yousef-a2120026/">Hanadie Yousef on LinkedIn</a></p><p><a href="https://www.juvenatherapeutics.com">Juvena Therapeutics</a></p><p><a href="https://www.linkedin.com/company/juvena-therapeutics/">Juvena Therapeutics on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 25 Nov 2024 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/f4b9629f/41104055.mp3" length="28201444" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/q2nDxbTsz9j1DK70CJc8gvGwi7Qn4z_GCO34hzLNCf4/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS84MWE5/OWI4OTgxOTdiN2Jl/ZGE5ZDU4ZjU1M2Mz/Y2ZjNS5wbmc.jpg"/>
      <itunes:duration>1172</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>How can advancements in biotechnology and machine learning lead to revolutionary treatments for age-related diseases? In this episode, I speak with Hanadie Yousef, CEO and Co-Founder of Juvena Therapeutics, to discuss her work on protein-based therapeutics. Hanadie, a neurobiologist specializing in aging and tissue degeneration, has pioneered research at Juvena to identify regenerative proteins that can restore tissue function and combat chronic diseases.</p><p>In our conversation, Hanadie details Juvena’s AI-driven platform that identifies, validates, and engineers protein candidates with therapeutic potential. We explore the power of machine learning models in drug discovery, the challenges of working with multi-omics data, and the potential for new treatments to revolutionize healthcare by targeting disease at the molecular level. Join us to hear how Juvena Therapeutics is setting a new standard in precision medicine aimed at helping individuals age with vitality.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>The founding story of Juvena Therapeutics and its mission to restore tissue health.</li><li>How the company leverages AI to identify regenerative proteins from stem cell secretions.</li><li>Learn how Juvena's machine learning models enable targeted protein engineering.</li><li>Explore the different types of data that Juvena utilizes and how they are structured.</li><li>Hear about the benefits of in-house data generation for model training and validation.</li><li>Discover the challenges of generating sufficient data for accurate model predictions.</li><li>Technological advancements in proteomics and multi-omics that support its platform.</li><li>Hanadie shares advice for AI-driven startups and her hopes for Juvena's future impact.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Juvena is part of really, a new approach to leveraging the biology of aging and underlying mechanisms associated with why our tissues decline in function, in order to target this biology so that we can treat a broad swath of diseases.” — Hanadie Yousef</p><p><br></p><p>“That's ultimately the goal of Juvena, to really enable people to age with dignity, to continue to contribute to society, and to really maintain their health until the very end.” — Hanadie Yousef</p><p><br></p><p>“Ultimately, [machine learning is] leveraged at every stage of the process from in silico prediction, and screening through to the actual engineering and drug development.” — Hanadie Yousef</p><p><br></p><p>“When it comes to wet lab data generation, sometimes you're really limited by just the quantity of data that you can generate.” — Hanadie Yousef</p><p><br></p><p>“AI isn't the solution to everything. Oftentimes, you do still want to have that human in the loop and really test the accuracy of these models.” — Hanadie Yousef</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/hanadie-yousef-a2120026/">Hanadie Yousef on LinkedIn</a></p><p><a href="https://www.juvenatherapeutics.com">Juvena Therapeutics</a></p><p><a href="https://www.linkedin.com/company/juvena-therapeutics/">Juvena Therapeutics on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, drug discovery</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/f4b9629f/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Real-World Evidence for Healthcare with Brigham Hyde from Atropos Health</title>
      <itunes:episode>109</itunes:episode>
      <podcast:episode>109</podcast:episode>
      <itunes:title>Real-World Evidence for Healthcare with Brigham Hyde from Atropos Health</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">f800d980-598e-4351-ba60-fd0b1f91bb41</guid>
      <link>https://pixelscientia.com/podcast/real-world-evidence-for-healthcare-with-brigham-hyde-from-atropos-health/</link>
      <description>
        <![CDATA[<p>To succeed at an AI startup, you have to be able to show your work and its value. During this episode, I am joined by Brigham Hyde, Co-Founder and CEO of Atropos Health, to talk about his app that gathers real-world evidence for healthcare. He is an entrepreneur, operator, and investor who is deeply immersed in the potential of data and AI. Join us as he shares his journey to creating Atropos Health, why he believes AI is important for healthcare, and the potential it holds to bridge the evidence gap. We discuss how the lack of diversity in healthcare data has impacted patient outcomes leading up to this point and explore some of the methods Atropos uses to get the most out of machine learning. We discuss the AI data-gathering process, how each setup is validated and adapted, and how he measures the impact of his technology. In closing, he shares advice for other leaders of AI-powered startups and offers his vision for the future impact of Atropos.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Welcoming Brigham Hyde, co-founder and CEO of Atropos Health.</li><li>His journey to creating Atropos Health after working in other medical AI arenas. </li><li>Why AI is important for healthcare: the evidence gap. </li><li>Atropos’s perspective on the role of real-world evidence.</li><li>How the lack of diversity in healthcare data sets impacts patient outcomes.</li><li>Methods Atropos uses to leverage machine learning to ensure that patient populations are supported.</li><li>The data-gathering process.</li><li>How the setup is validated and adapted according to need.</li><li>Measuring the impact of the technology. </li><li>Advice for other leaders of AI-powered startups. </li><li>Where Brigham foresees the impact of Atropos in three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“At Atropos, we focus on the automation and generation of high-quality real-world evidence to support clinical decision-making with the dream of creating personalized evidence for everyone.” — Brigham Hyde</p><p><br></p><p>“We see the role of real-world evidence and observational research as a great way to supplement that gap.” — Brigham Hyde</p><p><br></p><p>“It's our ability to create that evidence, transparently show you the populations that are being used and the bias that is involved, and the techniques to remove that bias that are the key.” — Brigham Hyde</p><p><br></p><p>“You've got to be able to show how what you're doing works, that it's not biased, and that it's applicable to the health system you're working with, and it's got to be done in extremely high quality.” — Brigham Hyde</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/brighamhyde/">Brigham Hyde on LinkedIn</a> </p><p><a href="https://x.com/BrighamHyde">Brigham Hyde on X<br></a><a href="https://www.atroposhealth.com/">Atropos Health<br></a><a href="https://www.linkedin.com/company/atropos-health/">Atropos Health on LinkedIn</a></p><p><a href="https://x.com/AtroposHealth?ref_src=twsrc%5Egoogle%7Ctwcamp%5Eserp%7Ctwgr%5Eauthor">Atropos Health on X</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>To succeed at an AI startup, you have to be able to show your work and its value. During this episode, I am joined by Brigham Hyde, Co-Founder and CEO of Atropos Health, to talk about his app that gathers real-world evidence for healthcare. He is an entrepreneur, operator, and investor who is deeply immersed in the potential of data and AI. Join us as he shares his journey to creating Atropos Health, why he believes AI is important for healthcare, and the potential it holds to bridge the evidence gap. We discuss how the lack of diversity in healthcare data has impacted patient outcomes leading up to this point and explore some of the methods Atropos uses to get the most out of machine learning. We discuss the AI data-gathering process, how each setup is validated and adapted, and how he measures the impact of his technology. In closing, he shares advice for other leaders of AI-powered startups and offers his vision for the future impact of Atropos.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Welcoming Brigham Hyde, co-founder and CEO of Atropos Health.</li><li>His journey to creating Atropos Health after working in other medical AI arenas. </li><li>Why AI is important for healthcare: the evidence gap. </li><li>Atropos’s perspective on the role of real-world evidence.</li><li>How the lack of diversity in healthcare data sets impacts patient outcomes.</li><li>Methods Atropos uses to leverage machine learning to ensure that patient populations are supported.</li><li>The data-gathering process.</li><li>How the setup is validated and adapted according to need.</li><li>Measuring the impact of the technology. </li><li>Advice for other leaders of AI-powered startups. </li><li>Where Brigham foresees the impact of Atropos in three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“At Atropos, we focus on the automation and generation of high-quality real-world evidence to support clinical decision-making with the dream of creating personalized evidence for everyone.” — Brigham Hyde</p><p><br></p><p>“We see the role of real-world evidence and observational research as a great way to supplement that gap.” — Brigham Hyde</p><p><br></p><p>“It's our ability to create that evidence, transparently show you the populations that are being used and the bias that is involved, and the techniques to remove that bias that are the key.” — Brigham Hyde</p><p><br></p><p>“You've got to be able to show how what you're doing works, that it's not biased, and that it's applicable to the health system you're working with, and it's got to be done in extremely high quality.” — Brigham Hyde</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/brighamhyde/">Brigham Hyde on LinkedIn</a> </p><p><a href="https://x.com/BrighamHyde">Brigham Hyde on X<br></a><a href="https://www.atroposhealth.com/">Atropos Health<br></a><a href="https://www.linkedin.com/company/atropos-health/">Atropos Health on LinkedIn</a></p><p><a href="https://x.com/AtroposHealth?ref_src=twsrc%5Egoogle%7Ctwcamp%5Eserp%7Ctwgr%5Eauthor">Atropos Health on X</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 18 Nov 2024 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/2d3443fe/213d8266.mp3" length="16541907" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/ZTjntsmIW3dCTtnCkPa3bs9edu4tjcrP9j-6cARzQdA/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS8xYmM4/YTQxMjA2MTZjNDg5/OTc4OGZmMWRmNjM1/NjQwNS5qcGVn.jpg"/>
      <itunes:duration>687</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>To succeed at an AI startup, you have to be able to show your work and its value. During this episode, I am joined by Brigham Hyde, Co-Founder and CEO of Atropos Health, to talk about his app that gathers real-world evidence for healthcare. He is an entrepreneur, operator, and investor who is deeply immersed in the potential of data and AI. Join us as he shares his journey to creating Atropos Health, why he believes AI is important for healthcare, and the potential it holds to bridge the evidence gap. We discuss how the lack of diversity in healthcare data has impacted patient outcomes leading up to this point and explore some of the methods Atropos uses to get the most out of machine learning. We discuss the AI data-gathering process, how each setup is validated and adapted, and how he measures the impact of his technology. In closing, he shares advice for other leaders of AI-powered startups and offers his vision for the future impact of Atropos.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Welcoming Brigham Hyde, co-founder and CEO of Atropos Health.</li><li>His journey to creating Atropos Health after working in other medical AI arenas. </li><li>Why AI is important for healthcare: the evidence gap. </li><li>Atropos’s perspective on the role of real-world evidence.</li><li>How the lack of diversity in healthcare data sets impacts patient outcomes.</li><li>Methods Atropos uses to leverage machine learning to ensure that patient populations are supported.</li><li>The data-gathering process.</li><li>How the setup is validated and adapted according to need.</li><li>Measuring the impact of the technology. </li><li>Advice for other leaders of AI-powered startups. </li><li>Where Brigham foresees the impact of Atropos in three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“At Atropos, we focus on the automation and generation of high-quality real-world evidence to support clinical decision-making with the dream of creating personalized evidence for everyone.” — Brigham Hyde</p><p><br></p><p>“We see the role of real-world evidence and observational research as a great way to supplement that gap.” — Brigham Hyde</p><p><br></p><p>“It's our ability to create that evidence, transparently show you the populations that are being used and the bias that is involved, and the techniques to remove that bias that are the key.” — Brigham Hyde</p><p><br></p><p>“You've got to be able to show how what you're doing works, that it's not biased, and that it's applicable to the health system you're working with, and it's got to be done in extremely high quality.” — Brigham Hyde</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/brighamhyde/">Brigham Hyde on LinkedIn</a> </p><p><a href="https://x.com/BrighamHyde">Brigham Hyde on X<br></a><a href="https://www.atroposhealth.com/">Atropos Health<br></a><a href="https://www.linkedin.com/company/atropos-health/">Atropos Health on LinkedIn</a></p><p><a href="https://x.com/AtroposHealth?ref_src=twsrc%5Egoogle%7Ctwcamp%5Eserp%7Ctwgr%5Eauthor">Atropos Health on X</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, healthcare, real world evidence, clinical trials</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/2d3443fe/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>De-Risking Drug Translation with Jo Varshney from VeriSIM Life</title>
      <itunes:episode>108</itunes:episode>
      <podcast:episode>108</podcast:episode>
      <itunes:title>De-Risking Drug Translation with Jo Varshney from VeriSIM Life</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">80026573-3952-4590-843b-36506aaf00c9</guid>
      <link>https://pixelscientia.com/podcast/de-risking-drug-translation-with-jo-varshney-from-verisim-life/</link>
      <description>
        <![CDATA[<p>As machine learning becomes increasingly widespread, AI holds the potential to revolutionize drug development, making it faster, safer, and more affordable than ever. In this episode, I'm joined by Jo Varshney, Founder and CEO of VeriSIM Life, to explore how her company is transforming drug translation through hybrid AI.</p><p>With her unique blend of expertise as a veterinarian and computer scientist, Jo leverages biology, chemistry, and machine learning knowledge to tackle the translational gap between animal models and human patients. You’ll learn about VeriSIM Life’s innovative approach to overcoming data limitations, synthesizing new data, and applying ML models tailored to various diseases, from rare conditions to neurological disorders. Jo also reveals VeriSIM’s unique translational index score, a tool that predicts clinical trial success rates and helps pharma companies identify promising drugs early and avoid costly failures.</p><p>For anyone curious about the future of AI in healthcare, this episode offers a fascinating glimpse into the world of biotech innovation. To discover how VeriSIM Life’s technology is poised to bring life-saving treatments to patients faster and more safely than ever before, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>How Jo's background and interest in translational challenges led her to found VeriSIM Life.</li><li>Addressing translational gaps between animal models and human trials with hybrid AI.</li><li>Combining biology-based models with ML to enhance drug testing accuracy.</li><li>Small molecules, peptides, large molecules, clinical trial outcomes, and other data inputs.</li><li>Ways that VeriSIM’s models are tailored per data type, ensuring maximum accuracy.</li><li>Insight into the challenge of overcoming data gaps and how VeriSIM solves it.</li><li>How hybrid AI reduces overfitting, boosting model accuracy in data-limited scenarios.</li><li>What goes into validating VeriSIM’s models through partnerships and external testing.</li><li>Measuring the impact of this technology with VeriSIM’s translational index score.</li><li>Jo’s advice for AI-powered startups: be specific, validate technology, and be adaptable.</li><li>Her predictions for the impact VeriSIM will have in the next few years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[Hybrid AI] helps us not only unravel newer methods and mechanisms of actions or novel targets but also helps us identify better drug candidates that could eventually be safer and more effective in human patients.” — Jo Varshney</p><p><br></p><p>“Biology is complex. We need to understand it enough to create a codified version of that biology.” — Jo Varshney</p><p><br></p><p>“If you're just using machine learning-based methods, you may not get the right features to see the accuracy that you would see with the hybrid AI approach that we take.” — Jo Varshney</p><p><br></p><p>“Focus on validation and showing some real-world outcomes [rather than] just building the marketing outcome because, ultimately, we want it to get to the patients. We want to know if the technology really works. If it doesn't work, you can still pivot.” — Jo Varshney</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.verisimlife.com/">VeriSIM Life</a></p><p><a href="https://www.linkedin.com/in/jyotika-jo-varshney">Jo Varshney on LinkedIn</a></p><p><a href="https://x.com/jo_verisim">Jo Varshney on X</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>As machine learning becomes increasingly widespread, AI holds the potential to revolutionize drug development, making it faster, safer, and more affordable than ever. In this episode, I'm joined by Jo Varshney, Founder and CEO of VeriSIM Life, to explore how her company is transforming drug translation through hybrid AI.</p><p>With her unique blend of expertise as a veterinarian and computer scientist, Jo leverages biology, chemistry, and machine learning knowledge to tackle the translational gap between animal models and human patients. You’ll learn about VeriSIM Life’s innovative approach to overcoming data limitations, synthesizing new data, and applying ML models tailored to various diseases, from rare conditions to neurological disorders. Jo also reveals VeriSIM’s unique translational index score, a tool that predicts clinical trial success rates and helps pharma companies identify promising drugs early and avoid costly failures.</p><p>For anyone curious about the future of AI in healthcare, this episode offers a fascinating glimpse into the world of biotech innovation. To discover how VeriSIM Life’s technology is poised to bring life-saving treatments to patients faster and more safely than ever before, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>How Jo's background and interest in translational challenges led her to found VeriSIM Life.</li><li>Addressing translational gaps between animal models and human trials with hybrid AI.</li><li>Combining biology-based models with ML to enhance drug testing accuracy.</li><li>Small molecules, peptides, large molecules, clinical trial outcomes, and other data inputs.</li><li>Ways that VeriSIM’s models are tailored per data type, ensuring maximum accuracy.</li><li>Insight into the challenge of overcoming data gaps and how VeriSIM solves it.</li><li>How hybrid AI reduces overfitting, boosting model accuracy in data-limited scenarios.</li><li>What goes into validating VeriSIM’s models through partnerships and external testing.</li><li>Measuring the impact of this technology with VeriSIM’s translational index score.</li><li>Jo’s advice for AI-powered startups: be specific, validate technology, and be adaptable.</li><li>Her predictions for the impact VeriSIM will have in the next few years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[Hybrid AI] helps us not only unravel newer methods and mechanisms of actions or novel targets but also helps us identify better drug candidates that could eventually be safer and more effective in human patients.” — Jo Varshney</p><p><br></p><p>“Biology is complex. We need to understand it enough to create a codified version of that biology.” — Jo Varshney</p><p><br></p><p>“If you're just using machine learning-based methods, you may not get the right features to see the accuracy that you would see with the hybrid AI approach that we take.” — Jo Varshney</p><p><br></p><p>“Focus on validation and showing some real-world outcomes [rather than] just building the marketing outcome because, ultimately, we want it to get to the patients. We want to know if the technology really works. If it doesn't work, you can still pivot.” — Jo Varshney</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.verisimlife.com/">VeriSIM Life</a></p><p><a href="https://www.linkedin.com/in/jyotika-jo-varshney">Jo Varshney on LinkedIn</a></p><p><a href="https://x.com/jo_verisim">Jo Varshney on X</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 11 Nov 2024 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/0c82319e/d96a570e.mp3" length="41350557" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/19uYgA9QhhPWlkeWF8QlL-qZAZyi1D8bh0zRyD8FY00/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS8yZjJl/ZTgzMThkMzE5YjQy/NTZhOTU1ZWUyMjUw/NTljOS5qcGVn.jpg"/>
      <itunes:duration>1721</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>As machine learning becomes increasingly widespread, AI holds the potential to revolutionize drug development, making it faster, safer, and more affordable than ever. In this episode, I'm joined by Jo Varshney, Founder and CEO of VeriSIM Life, to explore how her company is transforming drug translation through hybrid AI.</p><p>With her unique blend of expertise as a veterinarian and computer scientist, Jo leverages biology, chemistry, and machine learning knowledge to tackle the translational gap between animal models and human patients. You’ll learn about VeriSIM Life’s innovative approach to overcoming data limitations, synthesizing new data, and applying ML models tailored to various diseases, from rare conditions to neurological disorders. Jo also reveals VeriSIM’s unique translational index score, a tool that predicts clinical trial success rates and helps pharma companies identify promising drugs early and avoid costly failures.</p><p>For anyone curious about the future of AI in healthcare, this episode offers a fascinating glimpse into the world of biotech innovation. To discover how VeriSIM Life’s technology is poised to bring life-saving treatments to patients faster and more safely than ever before, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>How Jo's background and interest in translational challenges led her to found VeriSIM Life.</li><li>Addressing translational gaps between animal models and human trials with hybrid AI.</li><li>Combining biology-based models with ML to enhance drug testing accuracy.</li><li>Small molecules, peptides, large molecules, clinical trial outcomes, and other data inputs.</li><li>Ways that VeriSIM’s models are tailored per data type, ensuring maximum accuracy.</li><li>Insight into the challenge of overcoming data gaps and how VeriSIM solves it.</li><li>How hybrid AI reduces overfitting, boosting model accuracy in data-limited scenarios.</li><li>What goes into validating VeriSIM’s models through partnerships and external testing.</li><li>Measuring the impact of this technology with VeriSIM’s translational index score.</li><li>Jo’s advice for AI-powered startups: be specific, validate technology, and be adaptable.</li><li>Her predictions for the impact VeriSIM will have in the next few years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[Hybrid AI] helps us not only unravel newer methods and mechanisms of actions or novel targets but also helps us identify better drug candidates that could eventually be safer and more effective in human patients.” — Jo Varshney</p><p><br></p><p>“Biology is complex. We need to understand it enough to create a codified version of that biology.” — Jo Varshney</p><p><br></p><p>“If you're just using machine learning-based methods, you may not get the right features to see the accuracy that you would see with the hybrid AI approach that we take.” — Jo Varshney</p><p><br></p><p>“Focus on validation and showing some real-world outcomes [rather than] just building the marketing outcome because, ultimately, we want it to get to the patients. We want to know if the technology really works. If it doesn't work, you can still pivot.” — Jo Varshney</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.verisimlife.com/">VeriSIM Life</a></p><p><a href="https://www.linkedin.com/in/jyotika-jo-varshney">Jo Varshney on LinkedIn</a></p><p><a href="https://x.com/jo_verisim">Jo Varshney on X</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, drug discovery, translational medicine</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/0c82319e/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Decoding the Immune System for Drug Discovery with Noam Solomon from Immunai</title>
      <itunes:episode>107</itunes:episode>
      <podcast:episode>107</podcast:episode>
      <itunes:title>Decoding the Immune System for Drug Discovery with Noam Solomon from Immunai</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">9c1c5b6c-2a7a-4263-b192-f5cdc461136a</guid>
      <link>https://pixelscientia.com/podcast/decoding-the-immune-system-for-drug-discovery-with-noam-solomon-from-immunai/</link>
      <description>
        <![CDATA[<p>Today’s guest believes that decoding the immune system is at the heart of improving drug efficacy. He is currently focused on this effort as the CEO and Co-founder of Immunai – a company that is building an AI model of the immune system to facilitate the development of next-generation immunomodulatory therapeutics. Noam Solomon begins our conversation by detailing his professional history and how it led to Immunai before explaining what Immunai does and why this work is vital for healthcare. Then, we discover how understanding the immune system will help to improve how drugs work in our bodies, how the team at Immunai accomplishes its goals, the major challenges of working with complex ML models, and some helpful recommendations for processing the high-dimensional nature of biological data. Noam also explains the collaborative landscape of Immunai, how the evolution of technology made his work possible, Immunai’s plans for the future, and his advice to others on a similar career path. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Unpacking Noam Solomon’s professional journey that led to his founding of Immunai. </li><li>What Immunai does and why this work is vital for the healthcare industry. </li><li>How understanding the immune system will help to improve drug efficacy. </li><li>Exploring how Noam and his team use AI to accomplish their goals. </li><li>The standardization of data and other challenges of working with complex ML models. </li><li>Techniques for handling the high-dimensional nature of biological data.</li><li>How ML experts collaborate with other domains to inform and build Immunai’s models. </li><li>The technical advancements that have made Noam’s work possible. </li><li>His advice to other leaders of AI-powered startups, and imagining the future of Immunai. </li><li>How to connect with Noam and his work.  </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“First, let’s talk about the problem, which is today, getting a drug from IND approval to FDA approval—which is the process of doing clinical trials—has less than a 10% chance of success, usually about a 5% chance, takes more than 10 years, and more than $2 billion of open immune therapy.” — Noam Solomon</p><p><br></p><p>“Different people respond differently to the same drug, and the reason they respond differently is because their immune system is different.” — Noam Solomon</p><p><br></p><p>“You first need to fall in love with the problems. Many ML people—physicists, mathematicians, computer scientists—we love building models; we love solving puzzles. In biology, you need to really fall in love with the question you are trying to answer.” — Noam Solomon</p><p><br></p><p>“It’s a great decade for biology.” — Noam Solomon</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/noam-solomon-142393129">Noam Solomon on LinkedIn</a></p><p><a href="https://x.com/noamsolomon1">Noam Solomon on X</a></p><p><a href="https://www.immunai.com/">Immunai</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Today’s guest believes that decoding the immune system is at the heart of improving drug efficacy. He is currently focused on this effort as the CEO and Co-founder of Immunai – a company that is building an AI model of the immune system to facilitate the development of next-generation immunomodulatory therapeutics. Noam Solomon begins our conversation by detailing his professional history and how it led to Immunai before explaining what Immunai does and why this work is vital for healthcare. Then, we discover how understanding the immune system will help to improve how drugs work in our bodies, how the team at Immunai accomplishes its goals, the major challenges of working with complex ML models, and some helpful recommendations for processing the high-dimensional nature of biological data. Noam also explains the collaborative landscape of Immunai, how the evolution of technology made his work possible, Immunai’s plans for the future, and his advice to others on a similar career path. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Unpacking Noam Solomon’s professional journey that led to his founding of Immunai. </li><li>What Immunai does and why this work is vital for the healthcare industry. </li><li>How understanding the immune system will help to improve drug efficacy. </li><li>Exploring how Noam and his team use AI to accomplish their goals. </li><li>The standardization of data and other challenges of working with complex ML models. </li><li>Techniques for handling the high-dimensional nature of biological data.</li><li>How ML experts collaborate with other domains to inform and build Immunai’s models. </li><li>The technical advancements that have made Noam’s work possible. </li><li>His advice to other leaders of AI-powered startups, and imagining the future of Immunai. </li><li>How to connect with Noam and his work.  </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“First, let’s talk about the problem, which is today, getting a drug from IND approval to FDA approval—which is the process of doing clinical trials—has less than a 10% chance of success, usually about a 5% chance, takes more than 10 years, and more than $2 billion of open immune therapy.” — Noam Solomon</p><p><br></p><p>“Different people respond differently to the same drug, and the reason they respond differently is because their immune system is different.” — Noam Solomon</p><p><br></p><p>“You first need to fall in love with the problems. Many ML people—physicists, mathematicians, computer scientists—we love building models; we love solving puzzles. In biology, you need to really fall in love with the question you are trying to answer.” — Noam Solomon</p><p><br></p><p>“It’s a great decade for biology.” — Noam Solomon</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/noam-solomon-142393129">Noam Solomon on LinkedIn</a></p><p><a href="https://x.com/noamsolomon1">Noam Solomon on X</a></p><p><a href="https://www.immunai.com/">Immunai</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 04 Nov 2024 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/67a32cce/8d43b4b8.mp3" length="26013571" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/eo_7N_JGoi9oUsHHmJQISvhn7nP_8sSuelQh9Whr-j8/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS8xZDkz/MjJlNmQ1MTRjNGNh/YzFhYjBlNDliNTEx/YzU3Yi5qcGVn.jpg"/>
      <itunes:duration>1082</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Today’s guest believes that decoding the immune system is at the heart of improving drug efficacy. He is currently focused on this effort as the CEO and Co-founder of Immunai – a company that is building an AI model of the immune system to facilitate the development of next-generation immunomodulatory therapeutics. Noam Solomon begins our conversation by detailing his professional history and how it led to Immunai before explaining what Immunai does and why this work is vital for healthcare. Then, we discover how understanding the immune system will help to improve how drugs work in our bodies, how the team at Immunai accomplishes its goals, the major challenges of working with complex ML models, and some helpful recommendations for processing the high-dimensional nature of biological data. Noam also explains the collaborative landscape of Immunai, how the evolution of technology made his work possible, Immunai’s plans for the future, and his advice to others on a similar career path. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Unpacking Noam Solomon’s professional journey that led to his founding of Immunai. </li><li>What Immunai does and why this work is vital for the healthcare industry. </li><li>How understanding the immune system will help to improve drug efficacy. </li><li>Exploring how Noam and his team use AI to accomplish their goals. </li><li>The standardization of data and other challenges of working with complex ML models. </li><li>Techniques for handling the high-dimensional nature of biological data.</li><li>How ML experts collaborate with other domains to inform and build Immunai’s models. </li><li>The technical advancements that have made Noam’s work possible. </li><li>His advice to other leaders of AI-powered startups, and imagining the future of Immunai. </li><li>How to connect with Noam and his work.  </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“First, let’s talk about the problem, which is today, getting a drug from IND approval to FDA approval—which is the process of doing clinical trials—has less than a 10% chance of success, usually about a 5% chance, takes more than 10 years, and more than $2 billion of open immune therapy.” — Noam Solomon</p><p><br></p><p>“Different people respond differently to the same drug, and the reason they respond differently is because their immune system is different.” — Noam Solomon</p><p><br></p><p>“You first need to fall in love with the problems. Many ML people—physicists, mathematicians, computer scientists—we love building models; we love solving puzzles. In biology, you need to really fall in love with the question you are trying to answer.” — Noam Solomon</p><p><br></p><p>“It’s a great decade for biology.” — Noam Solomon</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/noam-solomon-142393129">Noam Solomon on LinkedIn</a></p><p><a href="https://x.com/noamsolomon1">Noam Solomon on X</a></p><p><a href="https://www.immunai.com/">Immunai</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, drug discovery, immunology, healthcare</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/67a32cce/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Foundation Model Series: Accelerating Radiology with Robert Bakos from HOPPR</title>
      <itunes:episode>106</itunes:episode>
      <podcast:episode>106</podcast:episode>
      <itunes:title>Foundation Model Series: Accelerating Radiology with Robert Bakos from HOPPR</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">f612bd8f-b592-48e1-bd06-0908e929ba02</guid>
      <link>https://pixelscientia.com/podcast/accelerating-radiology-with-robert-bakos-from-hoppr/</link>
      <description>
        <![CDATA[<p>Imagine a world where radiology backlogs are a thing of the past, and AI seamlessly augments the expertise of radiologists. Today, I'm joined by Robert Bakos, Co-Founder and CTO of HOPPR, to discuss how his company is bringing this vision to life. HOPPR is pioneering foundation models for medical imaging that have the potential to transform healthcare. With access to over 15 million diverse imaging studies, HOPPR is developing multimodal AI models that tackle radiology’s most significant challenges: high imaging volumes, limited specialist availability, and the growing demand for rapid, accurate diagnostics.</p><p>In this episode, Robert offers insight into the rigorous process of training these models on complex data while ensuring they integrate seamlessly into medical workflows. From data partnerships to specialized clinical collaboration, HOPPR’s approach sets new standards in healthcare AI. To discover how foundation models like these are revolutionizing radiology and making healthcare more efficient, accessible, and equitable, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Robert’s background in medical imaging and tech and how it led him to create HOPPR.</li><li>Ways that HOPPR’s AI models improve diagnostic speed and accuracy.</li><li>The significant data and compute resources required to build a foundation model like this.</li><li>Partnering with imaging organizations to collect diverse data across multiple modalities.</li><li>How HOPPR differentiates itself with ISO-compliant development and multimodal training.</li><li>The quantitative metrics and clinical review involved in validating its foundation model.</li><li>Key challenges in building this model include data access, diversity, and secure handling.</li><li>Reasons that proper data diversity and balance are essential to reduce model bias.</li><li>How API integration makes HOPPR’s models easy to adopt into existing workflows.</li><li>The real-world clinical needs and input that go into building an AI product roadmap.</li><li>Robert’s take on what the future of foundation models for medical imaging looks like.</li><li>Valuable lessons on the importance of strong labeling, compute scalability, and more.</li><li>Practical, real-world advice for other leaders of AI-powered startups.</li><li>The broader impact in healthcare that HOPPR aims to make.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Having clinical collaboration is super important. At HOPPR, our clinicians are an important part of our product development team – They're absolutely vital for helping us evaluate the performance of the model.” — Robert Bakos</p><p><br></p><p>“Because we are training across all these different modalities, getting access to this data can be challenging. Having great partnerships is critical for finding success in this space.” — Robert Bakos </p><p><br></p><p>“Make sure that you're addressing real problems. There are a lot of great ideas and cool things you can implement with AI, but at the end of the day, you want to make sure you can deliver value to your customers.” — Robert Bakos</p><p><br></p><p>“Foundation models – trained on a breadth of data – can make a positive impact on underserved areas around the world. With the volume of images growing so rapidly, constraints on radiologists, and burnout, it's important to leverage these models to make a big impact.” — Robert Bakos</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.hoppr.ai/robert-bakos">Robert Bakos</a></p><p><a href="https://www.hoppr.ai/">HOPPR</a></p><p><a href="https://www.linkedin.com/in/robert-bakos/">Robert Bakos on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Imagine a world where radiology backlogs are a thing of the past, and AI seamlessly augments the expertise of radiologists. Today, I'm joined by Robert Bakos, Co-Founder and CTO of HOPPR, to discuss how his company is bringing this vision to life. HOPPR is pioneering foundation models for medical imaging that have the potential to transform healthcare. With access to over 15 million diverse imaging studies, HOPPR is developing multimodal AI models that tackle radiology’s most significant challenges: high imaging volumes, limited specialist availability, and the growing demand for rapid, accurate diagnostics.</p><p>In this episode, Robert offers insight into the rigorous process of training these models on complex data while ensuring they integrate seamlessly into medical workflows. From data partnerships to specialized clinical collaboration, HOPPR’s approach sets new standards in healthcare AI. To discover how foundation models like these are revolutionizing radiology and making healthcare more efficient, accessible, and equitable, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Robert’s background in medical imaging and tech and how it led him to create HOPPR.</li><li>Ways that HOPPR’s AI models improve diagnostic speed and accuracy.</li><li>The significant data and compute resources required to build a foundation model like this.</li><li>Partnering with imaging organizations to collect diverse data across multiple modalities.</li><li>How HOPPR differentiates itself with ISO-compliant development and multimodal training.</li><li>The quantitative metrics and clinical review involved in validating its foundation model.</li><li>Key challenges in building this model include data access, diversity, and secure handling.</li><li>Reasons that proper data diversity and balance are essential to reduce model bias.</li><li>How API integration makes HOPPR’s models easy to adopt into existing workflows.</li><li>The real-world clinical needs and input that go into building an AI product roadmap.</li><li>Robert’s take on what the future of foundation models for medical imaging looks like.</li><li>Valuable lessons on the importance of strong labeling, compute scalability, and more.</li><li>Practical, real-world advice for other leaders of AI-powered startups.</li><li>The broader impact in healthcare that HOPPR aims to make.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Having clinical collaboration is super important. At HOPPR, our clinicians are an important part of our product development team – They're absolutely vital for helping us evaluate the performance of the model.” — Robert Bakos</p><p><br></p><p>“Because we are training across all these different modalities, getting access to this data can be challenging. Having great partnerships is critical for finding success in this space.” — Robert Bakos </p><p><br></p><p>“Make sure that you're addressing real problems. There are a lot of great ideas and cool things you can implement with AI, but at the end of the day, you want to make sure you can deliver value to your customers.” — Robert Bakos</p><p><br></p><p>“Foundation models – trained on a breadth of data – can make a positive impact on underserved areas around the world. With the volume of images growing so rapidly, constraints on radiologists, and burnout, it's important to leverage these models to make a big impact.” — Robert Bakos</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.hoppr.ai/robert-bakos">Robert Bakos</a></p><p><a href="https://www.hoppr.ai/">HOPPR</a></p><p><a href="https://www.linkedin.com/in/robert-bakos/">Robert Bakos on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 28 Oct 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/3c0ce110/d47b871e.mp3" length="27655893" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/v1V548sSmaagH12dwUYaM5ticioumXm1gmohlcRQX1Q/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS80MTUx/MWRkYmJiODI2M2U1/MDRlM2E2OWJkMTky/MTU2Yi5qcGVn.jpg"/>
      <itunes:duration>1722</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Imagine a world where radiology backlogs are a thing of the past, and AI seamlessly augments the expertise of radiologists. Today, I'm joined by Robert Bakos, Co-Founder and CTO of HOPPR, to discuss how his company is bringing this vision to life. HOPPR is pioneering foundation models for medical imaging that have the potential to transform healthcare. With access to over 15 million diverse imaging studies, HOPPR is developing multimodal AI models that tackle radiology’s most significant challenges: high imaging volumes, limited specialist availability, and the growing demand for rapid, accurate diagnostics.</p><p>In this episode, Robert offers insight into the rigorous process of training these models on complex data while ensuring they integrate seamlessly into medical workflows. From data partnerships to specialized clinical collaboration, HOPPR’s approach sets new standards in healthcare AI. To discover how foundation models like these are revolutionizing radiology and making healthcare more efficient, accessible, and equitable, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Robert’s background in medical imaging and tech and how it led him to create HOPPR.</li><li>Ways that HOPPR’s AI models improve diagnostic speed and accuracy.</li><li>The significant data and compute resources required to build a foundation model like this.</li><li>Partnering with imaging organizations to collect diverse data across multiple modalities.</li><li>How HOPPR differentiates itself with ISO-compliant development and multimodal training.</li><li>The quantitative metrics and clinical review involved in validating its foundation model.</li><li>Key challenges in building this model include data access, diversity, and secure handling.</li><li>Reasons that proper data diversity and balance are essential to reduce model bias.</li><li>How API integration makes HOPPR’s models easy to adopt into existing workflows.</li><li>The real-world clinical needs and input that go into building an AI product roadmap.</li><li>Robert’s take on what the future of foundation models for medical imaging looks like.</li><li>Valuable lessons on the importance of strong labeling, compute scalability, and more.</li><li>Practical, real-world advice for other leaders of AI-powered startups.</li><li>The broader impact in healthcare that HOPPR aims to make.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Having clinical collaboration is super important. At HOPPR, our clinicians are an important part of our product development team – They're absolutely vital for helping us evaluate the performance of the model.” — Robert Bakos</p><p><br></p><p>“Because we are training across all these different modalities, getting access to this data can be challenging. Having great partnerships is critical for finding success in this space.” — Robert Bakos </p><p><br></p><p>“Make sure that you're addressing real problems. There are a lot of great ideas and cool things you can implement with AI, but at the end of the day, you want to make sure you can deliver value to your customers.” — Robert Bakos</p><p><br></p><p>“Foundation models – trained on a breadth of data – can make a positive impact on underserved areas around the world. With the volume of images growing so rapidly, constraints on radiologists, and burnout, it's important to leverage these models to make a big impact.” — Robert Bakos</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.hoppr.ai/robert-bakos">Robert Bakos</a></p><p><a href="https://www.hoppr.ai/">HOPPR</a></p><p><a href="https://www.linkedin.com/in/robert-bakos/">Robert Bakos on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, radiology, foundation model</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/3c0ce110/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Optimizing Data Center Operations with Vedavyas Panneershelvam from Phaidra</title>
      <itunes:episode>105</itunes:episode>
      <podcast:episode>105</podcast:episode>
      <itunes:title>Optimizing Data Center Operations with Vedavyas Panneershelvam from Phaidra</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">cfa90f6e-2048-452b-8300-7854230e074a</guid>
      <link>https://pixelscientia.com/podcast/optimizing-data-center-operations-with-vedavyas-panneershelvam-from-phaidra/</link>
      <description>
        <![CDATA[<p>What are the unique challenges of operating mission-critical facilities, and how can reinforcement learning be applied to optimize data center operations? In this episode, I sit down with Vedavyas Panneershelvam, CTO and co-founder of Phaidra, to discuss how their cutting-edge AI technology is transforming the efficiency and reliability of data centers. Phaidra is an AI company that specializes in providing intelligent control systems for industrial facilities to optimize performance and efficiency. Vedavyas is a technology entrepreneur with a strong background in artificial intelligence and its applications in industrial and operational settings. In our conversation, we discuss how Phaidra’s closed-loop, self-learning autonomous control system optimizes cooling for data centers and why reinforcement learning is the key to creating intelligent systems that learn and adapt over time. Vedavyas also explains the intricacies of working with operational data, the importance of understanding the physics behind machine learning models, and the long-term impact of Phaidra’s technology on energy efficiency and sustainability. Join us as we explore how AI can solve complex problems in industry and learn how Phaidra is paving the way for the future of autonomous control with Vedavyas Panneershelvam.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Hear how collaborating on data center optimization at Google led to the founding of Phaidra.</li><li>How Phaidra’s AI-based autonomous control system optimizes data centers in real-time.</li><li>Discover how reinforcement learning is leveraged to improve data center operations.</li><li>Explore the range of data needed to continuously optimize the performance of data centers.</li><li>The challenges of using real-world data and the advantages of redundant data sources. </li><li>He explains how Phaidra ensures its models remain accurate even as conditions change.</li><li>Uncover Phaidra’s approach to validation and incorporating scalability across facilities. </li><li>Vedavyas shares why he thinks this type of technology is valuable and needed.</li><li>Recommendations for leaders of AI-powered startups and the future impact of Phaidra.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Phaidra is like a closed-loop self-learning autonomous control system that learns from its own experience.” — Vedavyas Panneershelvam</p><p><br></p><p>“Data centers basically generate so much heat, and they need to be cooled, and that takes a lot of energy, and also, the constraints in that use case are very, very narrow and tight.” — Vedavyas Panneershelvam</p><p><br></p><p>“The trick [to validation] is finding the right balance between relying on the physics and then how much do you trust the data.” — Vedavyas Panneershelvam</p><p><br></p><p>“[Large Language Models] have done a favor for us in helping the common public understand the potential of these, of machine learning in general.” — Vedavyas Panneershelvam</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/vedavyas-panneershelvam-22080214/">Vedavyas Panneershelvam on LinkedIn</a></p><p><a href="https://www.phaidra.ai/">Phaidra</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>What are the unique challenges of operating mission-critical facilities, and how can reinforcement learning be applied to optimize data center operations? In this episode, I sit down with Vedavyas Panneershelvam, CTO and co-founder of Phaidra, to discuss how their cutting-edge AI technology is transforming the efficiency and reliability of data centers. Phaidra is an AI company that specializes in providing intelligent control systems for industrial facilities to optimize performance and efficiency. Vedavyas is a technology entrepreneur with a strong background in artificial intelligence and its applications in industrial and operational settings. In our conversation, we discuss how Phaidra’s closed-loop, self-learning autonomous control system optimizes cooling for data centers and why reinforcement learning is the key to creating intelligent systems that learn and adapt over time. Vedavyas also explains the intricacies of working with operational data, the importance of understanding the physics behind machine learning models, and the long-term impact of Phaidra’s technology on energy efficiency and sustainability. Join us as we explore how AI can solve complex problems in industry and learn how Phaidra is paving the way for the future of autonomous control with Vedavyas Panneershelvam.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Hear how collaborating on data center optimization at Google led to the founding of Phaidra.</li><li>How Phaidra’s AI-based autonomous control system optimizes data centers in real-time.</li><li>Discover how reinforcement learning is leveraged to improve data center operations.</li><li>Explore the range of data needed to continuously optimize the performance of data centers.</li><li>The challenges of using real-world data and the advantages of redundant data sources. </li><li>He explains how Phaidra ensures its models remain accurate even as conditions change.</li><li>Uncover Phaidra’s approach to validation and incorporating scalability across facilities. </li><li>Vedavyas shares why he thinks this type of technology is valuable and needed.</li><li>Recommendations for leaders of AI-powered startups and the future impact of Phaidra.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Phaidra is like a closed-loop self-learning autonomous control system that learns from its own experience.” — Vedavyas Panneershelvam</p><p><br></p><p>“Data centers basically generate so much heat, and they need to be cooled, and that takes a lot of energy, and also, the constraints in that use case are very, very narrow and tight.” — Vedavyas Panneershelvam</p><p><br></p><p>“The trick [to validation] is finding the right balance between relying on the physics and then how much do you trust the data.” — Vedavyas Panneershelvam</p><p><br></p><p>“[Large Language Models] have done a favor for us in helping the common public understand the potential of these, of machine learning in general.” — Vedavyas Panneershelvam</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/vedavyas-panneershelvam-22080214/">Vedavyas Panneershelvam on LinkedIn</a></p><p><a href="https://www.phaidra.ai/">Phaidra</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 21 Oct 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/0de90819/a129bdac.mp3" length="32046266" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/6_XI0xMPw04_ls12cJwKF8vEQTCMfooviYS3l7jHoGs/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9jNzZi/MjMwZDJkNzM3NGRi/NTA3YzhjYTc0YTUx/OTY5NS5qcGVn.jpg"/>
      <itunes:duration>1334</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>What are the unique challenges of operating mission-critical facilities, and how can reinforcement learning be applied to optimize data center operations? In this episode, I sit down with Vedavyas Panneershelvam, CTO and co-founder of Phaidra, to discuss how their cutting-edge AI technology is transforming the efficiency and reliability of data centers. Phaidra is an AI company that specializes in providing intelligent control systems for industrial facilities to optimize performance and efficiency. Vedavyas is a technology entrepreneur with a strong background in artificial intelligence and its applications in industrial and operational settings. In our conversation, we discuss how Phaidra’s closed-loop, self-learning autonomous control system optimizes cooling for data centers and why reinforcement learning is the key to creating intelligent systems that learn and adapt over time. Vedavyas also explains the intricacies of working with operational data, the importance of understanding the physics behind machine learning models, and the long-term impact of Phaidra’s technology on energy efficiency and sustainability. Join us as we explore how AI can solve complex problems in industry and learn how Phaidra is paving the way for the future of autonomous control with Vedavyas Panneershelvam.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Hear how collaborating on data center optimization at Google led to the founding of Phaidra.</li><li>How Phaidra’s AI-based autonomous control system optimizes data centers in real-time.</li><li>Discover how reinforcement learning is leveraged to improve data center operations.</li><li>Explore the range of data needed to continuously optimize the performance of data centers.</li><li>The challenges of using real-world data and the advantages of redundant data sources. </li><li>He explains how Phaidra ensures its models remain accurate even as conditions change.</li><li>Uncover Phaidra’s approach to validation and incorporating scalability across facilities. </li><li>Vedavyas shares why he thinks this type of technology is valuable and needed.</li><li>Recommendations for leaders of AI-powered startups and the future impact of Phaidra.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Phaidra is like a closed-loop self-learning autonomous control system that learns from its own experience.” — Vedavyas Panneershelvam</p><p><br></p><p>“Data centers basically generate so much heat, and they need to be cooled, and that takes a lot of energy, and also, the constraints in that use case are very, very narrow and tight.” — Vedavyas Panneershelvam</p><p><br></p><p>“The trick [to validation] is finding the right balance between relying on the physics and then how much do you trust the data.” — Vedavyas Panneershelvam</p><p><br></p><p>“[Large Language Models] have done a favor for us in helping the common public understand the potential of these, of machine learning in general.” — Vedavyas Panneershelvam</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/vedavyas-panneershelvam-22080214/">Vedavyas Panneershelvam on LinkedIn</a></p><p><a href="https://www.phaidra.ai/">Phaidra</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, reinforcement learning, optimization, data centers</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/0de90819/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Structuring Medical Text with Tim O'Connell from Emtelligent</title>
      <itunes:episode>104</itunes:episode>
      <podcast:episode>104</podcast:episode>
      <itunes:title>Structuring Medical Text with Tim O'Connell from Emtelligent</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">9b04faab-3d80-48b8-a3f7-d4a1165275ed</guid>
      <link>https://pixelscientia.com/podcast/structuring-medical-text-with-tim-oconnell-from-emtelligent/</link>
      <description>
        <![CDATA[<p>What if AI could unlock the potential of healthcare’s vast, unstructured data? In this episode, Tim O'Connell, Co-Founder and CEO of Emtelligent, explains how his company is bridging the gap between messy medical data and usable insights with AI-powered solutions. Drawing from his background in both engineering and radiology, Tim discusses how he saw firsthand the inefficiencies caused by disorganized medical notes and reports, which led to the creation of Emtelligent. He breaks down how their AI models work to process and structure this data, making it usable for healthcare professionals, researchers, and beyond. Tim also dives into the technical challenges, from handling faxed medical records to ensuring high levels of precision and recall in model training. Beyond the technology, he emphasizes the importance of safety, ethical use, and how Emtelligent continues to adapt its AI to meet the evolving needs of the healthcare industry, helping to make patient care more efficient and accurate. Don’t miss out on this important conversation with Tim O’Connell from Emtelligent!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An overview of Tim’s background in engineering and radiology.</li><li>How Tim co-founded Emtelligent to solve pressing data issues in healthcare.</li><li>The importance of turning unstructured medical text into searchable, structured data.</li><li>How Emtelligent’s models extract metadata and structure from faxed patient records.</li><li>Why healthcare data is so challenging to work with, from shorthand to messy notes.</li><li>The role of precision and recall in assessing and improving model performance in healthcare.</li><li>Ensuring AI models continue to perform well after deployment with ongoing updates.</li><li>How Tim’s team maintains safety and ethical standards in AI healthcare solutions.</li><li>Creating technology that serves the end user; how it is informed by firsthand experience.</li><li>The importance of clinical input to develop relevant and practical AI healthcare tools.</li><li>Where Tim sees AI's impact in healthcare evolving over the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“During that year [that I was] working in the hospital, – I saw so many problems that we have in the healthcare environment and realized that quite a few of them had to do with the fact [that] we deal with so much unstructured data.” — Tim O’Connell</p><p><br></p><p>“Every time a human goes to see a caregiver, some kind of an unstructured text note is generated – We really can't use a lot of that data, unless it's another human who's reading that data.” — Tim O’Connell</p><p><br></p><p>“I’m still a practicing radiologist. – It’s not just a matter of intelligent people coming up with good ideas and going, ‘Oh, well. [Let’s throw this] against the wall and see what sticks’. We're developing solutions that are applicable in today's healthcare environment.” — Tim O’Connell</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/tim-o-connell-3b5064122/?originalSubdomain=ca">Tim O’Connell on LinkedIn</a></p><p><a href="https://emtelligent.com/">Emtelligent</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>What if AI could unlock the potential of healthcare’s vast, unstructured data? In this episode, Tim O'Connell, Co-Founder and CEO of Emtelligent, explains how his company is bridging the gap between messy medical data and usable insights with AI-powered solutions. Drawing from his background in both engineering and radiology, Tim discusses how he saw firsthand the inefficiencies caused by disorganized medical notes and reports, which led to the creation of Emtelligent. He breaks down how their AI models work to process and structure this data, making it usable for healthcare professionals, researchers, and beyond. Tim also dives into the technical challenges, from handling faxed medical records to ensuring high levels of precision and recall in model training. Beyond the technology, he emphasizes the importance of safety, ethical use, and how Emtelligent continues to adapt its AI to meet the evolving needs of the healthcare industry, helping to make patient care more efficient and accurate. Don’t miss out on this important conversation with Tim O’Connell from Emtelligent!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An overview of Tim’s background in engineering and radiology.</li><li>How Tim co-founded Emtelligent to solve pressing data issues in healthcare.</li><li>The importance of turning unstructured medical text into searchable, structured data.</li><li>How Emtelligent’s models extract metadata and structure from faxed patient records.</li><li>Why healthcare data is so challenging to work with, from shorthand to messy notes.</li><li>The role of precision and recall in assessing and improving model performance in healthcare.</li><li>Ensuring AI models continue to perform well after deployment with ongoing updates.</li><li>How Tim’s team maintains safety and ethical standards in AI healthcare solutions.</li><li>Creating technology that serves the end user; how it is informed by firsthand experience.</li><li>The importance of clinical input to develop relevant and practical AI healthcare tools.</li><li>Where Tim sees AI's impact in healthcare evolving over the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“During that year [that I was] working in the hospital, – I saw so many problems that we have in the healthcare environment and realized that quite a few of them had to do with the fact [that] we deal with so much unstructured data.” — Tim O’Connell</p><p><br></p><p>“Every time a human goes to see a caregiver, some kind of an unstructured text note is generated – We really can't use a lot of that data, unless it's another human who's reading that data.” — Tim O’Connell</p><p><br></p><p>“I’m still a practicing radiologist. – It’s not just a matter of intelligent people coming up with good ideas and going, ‘Oh, well. [Let’s throw this] against the wall and see what sticks’. We're developing solutions that are applicable in today's healthcare environment.” — Tim O’Connell</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/tim-o-connell-3b5064122/?originalSubdomain=ca">Tim O’Connell on LinkedIn</a></p><p><a href="https://emtelligent.com/">Emtelligent</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 14 Oct 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/35180be5/799b1481.mp3" length="17413941" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/o2TlIs3rOf9aZPsTId99j-6Qj1JE5YVv3pYU1P_7Wt4/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9mNzg5/OTBlZDhlZTZiNTY1/MGQwYWQyMmNjYmY4/ODQ5Yi5qcGVn.jpg"/>
      <itunes:duration>1082</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>What if AI could unlock the potential of healthcare’s vast, unstructured data? In this episode, Tim O'Connell, Co-Founder and CEO of Emtelligent, explains how his company is bridging the gap between messy medical data and usable insights with AI-powered solutions. Drawing from his background in both engineering and radiology, Tim discusses how he saw firsthand the inefficiencies caused by disorganized medical notes and reports, which led to the creation of Emtelligent. He breaks down how their AI models work to process and structure this data, making it usable for healthcare professionals, researchers, and beyond. Tim also dives into the technical challenges, from handling faxed medical records to ensuring high levels of precision and recall in model training. Beyond the technology, he emphasizes the importance of safety, ethical use, and how Emtelligent continues to adapt its AI to meet the evolving needs of the healthcare industry, helping to make patient care more efficient and accurate. Don’t miss out on this important conversation with Tim O’Connell from Emtelligent!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An overview of Tim’s background in engineering and radiology.</li><li>How Tim co-founded Emtelligent to solve pressing data issues in healthcare.</li><li>The importance of turning unstructured medical text into searchable, structured data.</li><li>How Emtelligent’s models extract metadata and structure from faxed patient records.</li><li>Why healthcare data is so challenging to work with, from shorthand to messy notes.</li><li>The role of precision and recall in assessing and improving model performance in healthcare.</li><li>Ensuring AI models continue to perform well after deployment with ongoing updates.</li><li>How Tim’s team maintains safety and ethical standards in AI healthcare solutions.</li><li>Creating technology that serves the end user; how it is informed by firsthand experience.</li><li>The importance of clinical input to develop relevant and practical AI healthcare tools.</li><li>Where Tim sees AI's impact in healthcare evolving over the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“During that year [that I was] working in the hospital, – I saw so many problems that we have in the healthcare environment and realized that quite a few of them had to do with the fact [that] we deal with so much unstructured data.” — Tim O’Connell</p><p><br></p><p>“Every time a human goes to see a caregiver, some kind of an unstructured text note is generated – We really can't use a lot of that data, unless it's another human who's reading that data.” — Tim O’Connell</p><p><br></p><p>“I’m still a practicing radiologist. – It’s not just a matter of intelligent people coming up with good ideas and going, ‘Oh, well. [Let’s throw this] against the wall and see what sticks’. We're developing solutions that are applicable in today's healthcare environment.” — Tim O’Connell</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/tim-o-connell-3b5064122/?originalSubdomain=ca">Tim O’Connell on LinkedIn</a></p><p><a href="https://emtelligent.com/">Emtelligent</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, natural language processing, healthcare</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/35180be5/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Foundation Model Series: Enabling Digital Pathology Workflows with Dmitry Nechaev from HistAI</title>
      <itunes:episode>103</itunes:episode>
      <podcast:episode>103</podcast:episode>
      <itunes:title>Foundation Model Series: Enabling Digital Pathology Workflows with Dmitry Nechaev from HistAI</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">43616633-27b2-4cca-9e0d-e2064737f6b9</guid>
      <link>https://pixelscientia.com/podcast/enabling-digital-pathology-workflows-with-dmitry-nechaev-from-histai/</link>
      <description>
        <![CDATA[<p>What happens when you combine AI with digital pathology? In this episode, Dmitry Nechaev, Chief AI Scientist and co-founder of HistAI, joins me to discuss the complexity of building foundation models specifically for digital pathology. Dmitry has a strong background in machine learning and experience in high-resolution image analysis. At HistAI, he leads the development of cutting-edge AI models tailored for pathology.</p><p>HistAI, a digital pathology company, focuses on developing AI-driven solutions that assist pathologists in analyzing complex tissue samples faster and more accurately. In our conversation, we unpack the development and application of foundation models for digital pathology. Dmitry explains why conventional models trained on natural images often struggle with pathology data and how HistAI’s models address this gap. Learn about the technical challenges of training these models and the steps for managing massive datasets, selecting the correct training methods, and optimizing for high-speed performance. Join me and explore how AI is transforming digital pathology workflows with Dmitry Nechaev!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background about Dmitry, his path to HistAI, and his role at the company.</li><li>What whole slide images are and the challenges of working with them.</li><li>How AI can streamline diagnostics and reduce the workload for pathologists.</li><li>Why foundation models are a core component of HistAI’s technology. </li><li>The scale of data and compute power required to build foundation models.</li><li>Outline of the different approaches to building a foundation model.</li><li>Privacy aspects of building models based on medical data.</li><li>Challenges Dmitry has faced developing HistAI’s foundation model. </li><li>Hear what makes HistAI’s foundation model different from other models.</li><li>Learn about his approach to benchmarking and improving a model. </li><li>Explore how foundation models are leveraged in HistAI’s technology. </li><li>The future of foundation models and his lessons from developing them.</li><li>Final takeaways and how to access HistAI’s open-source models.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Regular foundation models are trained on natural images and I'd say they are not good at generalizing to pathological data.” — Dmitry Nechaev</p><p><br></p><p>“In short, [a foundational model] requires a lot of data and a lot of [compute power].” — Dmitry Nechaev</p><p>“Public benchmarks [are] a really good thing.” — Dmitry Nechaev</p><p><br></p><p>“Our foundation models are fully open-source. We don't really try to sell them. In a sense, they are kind of useless by themselves, since you need to train something on top of them, so we don't try to profit from these models.” — Dmitry Nechaev</p><p><br></p><p>“The best lesson is that you need quality data to get a quality model.” — Dmitry Nechaev</p><p><br></p><p>“[HistAI] don't want AI technologies to be a privilege of the richest countries. We want that to be available around the world.” — Dmitry Nechaev</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/mgvz/">Dmitry Nechaev on LinkedIn</a></p><p><a href="https://github.com/megavaz">Dmitry Nechaev on GitHub</a></p><p><a href="https://www.hist.ai/">HistAI</a></p><p><a href="https://www.hist.ai/blog/histai-launch">CELLDX</a></p><p><a href="https://huggingface.co/histai/hibou-b">Hibou on Hugging Face</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>What happens when you combine AI with digital pathology? In this episode, Dmitry Nechaev, Chief AI Scientist and co-founder of HistAI, joins me to discuss the complexity of building foundation models specifically for digital pathology. Dmitry has a strong background in machine learning and experience in high-resolution image analysis. At HistAI, he leads the development of cutting-edge AI models tailored for pathology.</p><p>HistAI, a digital pathology company, focuses on developing AI-driven solutions that assist pathologists in analyzing complex tissue samples faster and more accurately. In our conversation, we unpack the development and application of foundation models for digital pathology. Dmitry explains why conventional models trained on natural images often struggle with pathology data and how HistAI’s models address this gap. Learn about the technical challenges of training these models and the steps for managing massive datasets, selecting the correct training methods, and optimizing for high-speed performance. Join me and explore how AI is transforming digital pathology workflows with Dmitry Nechaev!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background about Dmitry, his path to HistAI, and his role at the company.</li><li>What whole slide images are and the challenges of working with them.</li><li>How AI can streamline diagnostics and reduce the workload for pathologists.</li><li>Why foundation models are a core component of HistAI’s technology. </li><li>The scale of data and compute power required to build foundation models.</li><li>Outline of the different approaches to building a foundation model.</li><li>Privacy aspects of building models based on medical data.</li><li>Challenges Dmitry has faced developing HistAI’s foundation model. </li><li>Hear what makes HistAI’s foundation model different from other models.</li><li>Learn about his approach to benchmarking and improving a model. </li><li>Explore how foundation models are leveraged in HistAI’s technology. </li><li>The future of foundation models and his lessons from developing them.</li><li>Final takeaways and how to access HistAI’s open-source models.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Regular foundation models are trained on natural images and I'd say they are not good at generalizing to pathological data.” — Dmitry Nechaev</p><p><br></p><p>“In short, [a foundational model] requires a lot of data and a lot of [compute power].” — Dmitry Nechaev</p><p>“Public benchmarks [are] a really good thing.” — Dmitry Nechaev</p><p><br></p><p>“Our foundation models are fully open-source. We don't really try to sell them. In a sense, they are kind of useless by themselves, since you need to train something on top of them, so we don't try to profit from these models.” — Dmitry Nechaev</p><p><br></p><p>“The best lesson is that you need quality data to get a quality model.” — Dmitry Nechaev</p><p><br></p><p>“[HistAI] don't want AI technologies to be a privilege of the richest countries. We want that to be available around the world.” — Dmitry Nechaev</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/mgvz/">Dmitry Nechaev on LinkedIn</a></p><p><a href="https://github.com/megavaz">Dmitry Nechaev on GitHub</a></p><p><a href="https://www.hist.ai/">HistAI</a></p><p><a href="https://www.hist.ai/blog/histai-launch">CELLDX</a></p><p><a href="https://huggingface.co/histai/hibou-b">Hibou on Hugging Face</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 07 Oct 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/6475c91a/b82231cc.mp3" length="42558496" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/azGxTgnT0nRr-ecmMGc8L-0n4-vj5o959qn-8izq9mc/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS81NWE1/N2U2OGVmMDE4ZTNm/YmE5NDdkNTc4YWRi/Y2IxMS5wbmc.jpg"/>
      <itunes:duration>1772</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>What happens when you combine AI with digital pathology? In this episode, Dmitry Nechaev, Chief AI Scientist and co-founder of HistAI, joins me to discuss the complexity of building foundation models specifically for digital pathology. Dmitry has a strong background in machine learning and experience in high-resolution image analysis. At HistAI, he leads the development of cutting-edge AI models tailored for pathology.</p><p>HistAI, a digital pathology company, focuses on developing AI-driven solutions that assist pathologists in analyzing complex tissue samples faster and more accurately. In our conversation, we unpack the development and application of foundation models for digital pathology. Dmitry explains why conventional models trained on natural images often struggle with pathology data and how HistAI’s models address this gap. Learn about the technical challenges of training these models and the steps for managing massive datasets, selecting the correct training methods, and optimizing for high-speed performance. Join me and explore how AI is transforming digital pathology workflows with Dmitry Nechaev!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background about Dmitry, his path to HistAI, and his role at the company.</li><li>What whole slide images are and the challenges of working with them.</li><li>How AI can streamline diagnostics and reduce the workload for pathologists.</li><li>Why foundation models are a core component of HistAI’s technology. </li><li>The scale of data and compute power required to build foundation models.</li><li>Outline of the different approaches to building a foundation model.</li><li>Privacy aspects of building models based on medical data.</li><li>Challenges Dmitry has faced developing HistAI’s foundation model. </li><li>Hear what makes HistAI’s foundation model different from other models.</li><li>Learn about his approach to benchmarking and improving a model. </li><li>Explore how foundation models are leveraged in HistAI’s technology. </li><li>The future of foundation models and his lessons from developing them.</li><li>Final takeaways and how to access HistAI’s open-source models.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Regular foundation models are trained on natural images and I'd say they are not good at generalizing to pathological data.” — Dmitry Nechaev</p><p><br></p><p>“In short, [a foundational model] requires a lot of data and a lot of [compute power].” — Dmitry Nechaev</p><p>“Public benchmarks [are] a really good thing.” — Dmitry Nechaev</p><p><br></p><p>“Our foundation models are fully open-source. We don't really try to sell them. In a sense, they are kind of useless by themselves, since you need to train something on top of them, so we don't try to profit from these models.” — Dmitry Nechaev</p><p><br></p><p>“The best lesson is that you need quality data to get a quality model.” — Dmitry Nechaev</p><p><br></p><p>“[HistAI] don't want AI technologies to be a privilege of the richest countries. We want that to be available around the world.” — Dmitry Nechaev</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/mgvz/">Dmitry Nechaev on LinkedIn</a></p><p><a href="https://github.com/megavaz">Dmitry Nechaev on GitHub</a></p><p><a href="https://www.hist.ai/">HistAI</a></p><p><a href="https://www.hist.ai/blog/histai-launch">CELLDX</a></p><p><a href="https://huggingface.co/histai/hibou-b">Hibou on Hugging Face</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, foundation models, histology, pathology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/6475c91a/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Foundation Model Series: Creating Small Molecules for Drug Discovery with Jason Rolfe from Variational AI</title>
      <itunes:episode>102</itunes:episode>
      <podcast:episode>102</podcast:episode>
      <itunes:title>Foundation Model Series: Creating Small Molecules for Drug Discovery with Jason Rolfe from Variational AI</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">c8013fbf-9b0d-42f3-87ad-b5e54b433702</guid>
      <link>https://pixelscientia.com/podcast/creating-small-molecules-for-drug-discovery-with-jason-rolfe-from-variational-ai/</link>
      <description>
        <![CDATA[<p>Building on the trends in language processing, domain-specific foundation models are unlocking new possibilities. In the realm of drug discovery, Jason Rolfe is spearheading innovation at the intersection of AI and pharmaceuticals. As the Co-Founder and CTO of Variational AI, Jason leads a platform designed to generate novel small molecule structures that accelerate drug development. In this episode, he delves into how Variational AI uses foundation models to predict and optimize small molecules, overcoming the immense complexity of drug discovery by leveraging vast datasets and sophisticated computational techniques. He also addresses the key challenges of modeling molecular potency and why traditional machine-learning approaches often fall short. For anyone curious about AI's impact on healthcare, this conversation offers a fascinating look into cutting-edge innovations set to reshape the pharmaceutical industry. Tune in to find out how the types of breakthroughs we discuss in this episode could revolutionize drug development, bring new therapeutics to market across disease areas, and positively impact lives!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An overview of Jason’s background and how it led him to create Variational AI.</li><li>What Variational AI does for the small molecule domain for drug discovery.</li><li>How they use foundation models to predict and enhance the design of small molecules.</li><li>Defining small molecules, their appeal, and an overview of Variational AI's data sets.</li><li>What goes into training Variational AI's foundation model.</li><li>The computational infrastructure and algorithms necessary to process this data.</li><li>Challenges of predicting molecular potency against disease-related protein targets.</li><li>Various ways that Variational AI’s foundation model underpins everything they do.</li><li>Evaluating progress: balancing predictive success with experimental validation.</li><li>Lessons from developing foundation models that could apply to other data types.</li><li>Jason’s funding and research-focused advice for leaders of AI-powered startups.</li><li>The transformative impact of Variational AI’s technology on drug development.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Rather than forming individual models for specific drug targets, we're creating a joint model over hundreds, eventually thousands of drug targets.” — Jason Rolfe</p><p><br></p><p>“Data quality is essential. In particular, if you're drawing from multiple different data sources, frequently, those sources aren't commensurable.” — Jason Rolfe</p><p><br></p><p>“If you don't have a proven track record where people are already throwing money at you, it is very challenging to try to bring a new technology from the drawing board into commercial application using venture funding.” — Jason Rolfe</p><p><br></p><p>“Whenever you're developing a new technology or product, you need to test early and often. Some of your intuitions will be good. Most of your intuitions will be a waste of time – The more quickly you can distinguish between those two classes, the more efficiently you can move toward success.” — Jason Rolfe</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://variational.ai/">Variational AI</a></p><p><a href="https://variationalai.substack.com/">Variational AI Blog</a></p><p><a href="https://www.linkedin.com/in/jason-rolfe-323b231/">Jason Rolfe on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Building on the trends in language processing, domain-specific foundation models are unlocking new possibilities. In the realm of drug discovery, Jason Rolfe is spearheading innovation at the intersection of AI and pharmaceuticals. As the Co-Founder and CTO of Variational AI, Jason leads a platform designed to generate novel small molecule structures that accelerate drug development. In this episode, he delves into how Variational AI uses foundation models to predict and optimize small molecules, overcoming the immense complexity of drug discovery by leveraging vast datasets and sophisticated computational techniques. He also addresses the key challenges of modeling molecular potency and why traditional machine-learning approaches often fall short. For anyone curious about AI's impact on healthcare, this conversation offers a fascinating look into cutting-edge innovations set to reshape the pharmaceutical industry. Tune in to find out how the types of breakthroughs we discuss in this episode could revolutionize drug development, bring new therapeutics to market across disease areas, and positively impact lives!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An overview of Jason’s background and how it led him to create Variational AI.</li><li>What Variational AI does for the small molecule domain for drug discovery.</li><li>How they use foundation models to predict and enhance the design of small molecules.</li><li>Defining small molecules, their appeal, and an overview of Variational AI's data sets.</li><li>What goes into training Variational AI's foundation model.</li><li>The computational infrastructure and algorithms necessary to process this data.</li><li>Challenges of predicting molecular potency against disease-related protein targets.</li><li>Various ways that Variational AI’s foundation model underpins everything they do.</li><li>Evaluating progress: balancing predictive success with experimental validation.</li><li>Lessons from developing foundation models that could apply to other data types.</li><li>Jason’s funding and research-focused advice for leaders of AI-powered startups.</li><li>The transformative impact of Variational AI’s technology on drug development.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Rather than forming individual models for specific drug targets, we're creating a joint model over hundreds, eventually thousands of drug targets.” — Jason Rolfe</p><p><br></p><p>“Data quality is essential. In particular, if you're drawing from multiple different data sources, frequently, those sources aren't commensurable.” — Jason Rolfe</p><p><br></p><p>“If you don't have a proven track record where people are already throwing money at you, it is very challenging to try to bring a new technology from the drawing board into commercial application using venture funding.” — Jason Rolfe</p><p><br></p><p>“Whenever you're developing a new technology or product, you need to test early and often. Some of your intuitions will be good. Most of your intuitions will be a waste of time – The more quickly you can distinguish between those two classes, the more efficiently you can move toward success.” — Jason Rolfe</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://variational.ai/">Variational AI</a></p><p><a href="https://variationalai.substack.com/">Variational AI Blog</a></p><p><a href="https://www.linkedin.com/in/jason-rolfe-323b231/">Jason Rolfe on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 30 Sep 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/b6b16a73/52c763ff.mp3" length="28414125" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/WzALIdi_FpHQvC_yq7nYVXsO0dVA7SMnBJtXZyfDoHY/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS80Mzc5/ZmI2MTQyNThkYTFm/MjM3ZmE3Yjg1YjE4/MTM3My5wbmc.jpg"/>
      <itunes:duration>1769</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Building on the trends in language processing, domain-specific foundation models are unlocking new possibilities. In the realm of drug discovery, Jason Rolfe is spearheading innovation at the intersection of AI and pharmaceuticals. As the Co-Founder and CTO of Variational AI, Jason leads a platform designed to generate novel small molecule structures that accelerate drug development. In this episode, he delves into how Variational AI uses foundation models to predict and optimize small molecules, overcoming the immense complexity of drug discovery by leveraging vast datasets and sophisticated computational techniques. He also addresses the key challenges of modeling molecular potency and why traditional machine-learning approaches often fall short. For anyone curious about AI's impact on healthcare, this conversation offers a fascinating look into cutting-edge innovations set to reshape the pharmaceutical industry. Tune in to find out how the types of breakthroughs we discuss in this episode could revolutionize drug development, bring new therapeutics to market across disease areas, and positively impact lives!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An overview of Jason’s background and how it led him to create Variational AI.</li><li>What Variational AI does for the small molecule domain for drug discovery.</li><li>How they use foundation models to predict and enhance the design of small molecules.</li><li>Defining small molecules, their appeal, and an overview of Variational AI's data sets.</li><li>What goes into training Variational AI's foundation model.</li><li>The computational infrastructure and algorithms necessary to process this data.</li><li>Challenges of predicting molecular potency against disease-related protein targets.</li><li>Various ways that Variational AI’s foundation model underpins everything they do.</li><li>Evaluating progress: balancing predictive success with experimental validation.</li><li>Lessons from developing foundation models that could apply to other data types.</li><li>Jason’s funding and research-focused advice for leaders of AI-powered startups.</li><li>The transformative impact of Variational AI’s technology on drug development.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Rather than forming individual models for specific drug targets, we're creating a joint model over hundreds, eventually thousands of drug targets.” — Jason Rolfe</p><p><br></p><p>“Data quality is essential. In particular, if you're drawing from multiple different data sources, frequently, those sources aren't commensurable.” — Jason Rolfe</p><p><br></p><p>“If you don't have a proven track record where people are already throwing money at you, it is very challenging to try to bring a new technology from the drawing board into commercial application using venture funding.” — Jason Rolfe</p><p><br></p><p>“Whenever you're developing a new technology or product, you need to test early and often. Some of your intuitions will be good. Most of your intuitions will be a waste of time – The more quickly you can distinguish between those two classes, the more efficiently you can move toward success.” — Jason Rolfe</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://variational.ai/">Variational AI</a></p><p><a href="https://variationalai.substack.com/">Variational AI Blog</a></p><p><a href="https://www.linkedin.com/in/jason-rolfe-323b231/">Jason Rolfe on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, drug discovery, small molecules, foundation model</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/b6b16a73/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Foundation Model Series: Building New Materials for Climate with Jonathan Godwin from Orbital Materials</title>
      <itunes:episode>101</itunes:episode>
      <podcast:episode>101</podcast:episode>
      <itunes:title>Foundation Model Series: Building New Materials for Climate with Jonathan Godwin from Orbital Materials</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">ba739755-ae4d-49e1-b42c-c5223c6c4635</guid>
      <link>https://pixelscientia.com/podcast/building-news-materials-for-climate-with-jonathan-godwin-from-orbital-materials/</link>
      <description>
        <![CDATA[<p>AI is unlocking the future of materials science and today’s guest Jonathan Godwin, co-founder and CEO of Orbital Materials, is at the forefront of this transformation. With a background in AI research and experience leading groundbreaking projects at Google-owned DeepMind, Jonathan is now applying machine learning to develop advanced materials that can drive decarbonization.</p><p>In this episode, he explains how Orbital Materials is using foundation models (like ChatGPT for language or MidJourney for images) to design new materials that capture carbon, store energy, and improve industrial efficiency. He also shares insights into the company’s mission, the challenges of simulating atomic-level interactions, and why open-sourcing their model, Orb, is crucial for innovation.</p><p>To discover how AI is revolutionizing the fight against climate change and learn how these cutting-edge materials could shape a more sustainable future, don’t miss this inspiring conversation with Jonathan Godwin!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into Jonathan’s diverse career path and how it led him to Orbital Materials.</li><li>What types of advanced materials Orbital develops and their potential impact.</li><li>The critical role AI plays in developing materials for decarbonization purposes.</li><li>Defining foundation models and why they’re an essential part of leveraging AI.</li><li>3D atomic simulations and other types of data that go into Orbital’s foundation model.</li><li>The computing infrastructure required to build a foundation model for materials.</li><li>Engineering and other challenges encountered while building models at this scale. </li><li>How AI enhances scientific discovery without replacing human expertise.</li><li>Why open-sourcing Orbital’s foundation model, Orb, is key for innovation.</li><li>Lessons from developing this model that could be applied to other data types.</li><li>Jonathan’s detail-oriented advice for leaders of AI-powered startups.</li><li>Orbital’s exciting mission to accelerate new materials development.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We develop materials that can capture CO2 from specific gas streams – coming out of an industrial facility, new energy storage technologies that allow – [data centers] to operate behind the meter, or ways to improve the water efficiency of a data center or industrial facility.” — Jonathan Godwin</p><p><br></p><p>“Foundation models are the crux of how we're able to leverage AI in this day and age. If you want to [say], 'We're pushing the limits of what AI is able to do. We're leveraging the most recent breakthroughs,' – you've got to be building foundation models or using foundation models.” — Jonathan Godwin</p><p><br></p><p>“AI is a massively powerful creativity aid and accelerant. We’ve seen that in other areas of AI and we're bringing that to advanced materials.” — Jonathan Godwin</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.orbitalmaterials.com/">Orbital Materials</a></p><p><a href="https://www.linkedin.com/company/orbitalmaterials/">Orbital Materials on LinkedIn</a></p><p><a href="https://x.com/OrbMaterials">Orbital Materials on X</a></p><p><a href="https://github.com/orbital-materials">Orbital Materials on GitHub</a></p><p><a href="https://www.linkedin.com/in/jonathan-godwin-12907638/">Jonathan Godwin on LinkedIn</a></p><p><a href="https://x.com/jgodwin_ai">Jonathan Godwin on X</a></p><p><a href="https://substack.com/@rootnodes">Jonathan Godwin Substack</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>AI is unlocking the future of materials science and today’s guest Jonathan Godwin, co-founder and CEO of Orbital Materials, is at the forefront of this transformation. With a background in AI research and experience leading groundbreaking projects at Google-owned DeepMind, Jonathan is now applying machine learning to develop advanced materials that can drive decarbonization.</p><p>In this episode, he explains how Orbital Materials is using foundation models (like ChatGPT for language or MidJourney for images) to design new materials that capture carbon, store energy, and improve industrial efficiency. He also shares insights into the company’s mission, the challenges of simulating atomic-level interactions, and why open-sourcing their model, Orb, is crucial for innovation.</p><p>To discover how AI is revolutionizing the fight against climate change and learn how these cutting-edge materials could shape a more sustainable future, don’t miss this inspiring conversation with Jonathan Godwin!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into Jonathan’s diverse career path and how it led him to Orbital Materials.</li><li>What types of advanced materials Orbital develops and their potential impact.</li><li>The critical role AI plays in developing materials for decarbonization purposes.</li><li>Defining foundation models and why they’re an essential part of leveraging AI.</li><li>3D atomic simulations and other types of data that go into Orbital’s foundation model.</li><li>The computing infrastructure required to build a foundation model for materials.</li><li>Engineering and other challenges encountered while building models at this scale. </li><li>How AI enhances scientific discovery without replacing human expertise.</li><li>Why open-sourcing Orbital’s foundation model, Orb, is key for innovation.</li><li>Lessons from developing this model that could be applied to other data types.</li><li>Jonathan’s detail-oriented advice for leaders of AI-powered startups.</li><li>Orbital’s exciting mission to accelerate new materials development.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We develop materials that can capture CO2 from specific gas streams – coming out of an industrial facility, new energy storage technologies that allow – [data centers] to operate behind the meter, or ways to improve the water efficiency of a data center or industrial facility.” — Jonathan Godwin</p><p><br></p><p>“Foundation models are the crux of how we're able to leverage AI in this day and age. If you want to [say], 'We're pushing the limits of what AI is able to do. We're leveraging the most recent breakthroughs,' – you've got to be building foundation models or using foundation models.” — Jonathan Godwin</p><p><br></p><p>“AI is a massively powerful creativity aid and accelerant. We’ve seen that in other areas of AI and we're bringing that to advanced materials.” — Jonathan Godwin</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.orbitalmaterials.com/">Orbital Materials</a></p><p><a href="https://www.linkedin.com/company/orbitalmaterials/">Orbital Materials on LinkedIn</a></p><p><a href="https://x.com/OrbMaterials">Orbital Materials on X</a></p><p><a href="https://github.com/orbital-materials">Orbital Materials on GitHub</a></p><p><a href="https://www.linkedin.com/in/jonathan-godwin-12907638/">Jonathan Godwin on LinkedIn</a></p><p><a href="https://x.com/jgodwin_ai">Jonathan Godwin on X</a></p><p><a href="https://substack.com/@rootnodes">Jonathan Godwin Substack</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 23 Sep 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/02be6028/55622343.mp3" length="36138686" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/6kWULl7cmfaeUWmLdoW1Aq8sSSWF8jw3ubWyQmcgDTA/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS81YTA1/ZjhhZGRhZjJiNjVi/MDNmNGVjMGU0NTBm/YTIyOS5qcGVn.jpg"/>
      <itunes:duration>1503</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>AI is unlocking the future of materials science and today’s guest Jonathan Godwin, co-founder and CEO of Orbital Materials, is at the forefront of this transformation. With a background in AI research and experience leading groundbreaking projects at Google-owned DeepMind, Jonathan is now applying machine learning to develop advanced materials that can drive decarbonization.</p><p>In this episode, he explains how Orbital Materials is using foundation models (like ChatGPT for language or MidJourney for images) to design new materials that capture carbon, store energy, and improve industrial efficiency. He also shares insights into the company’s mission, the challenges of simulating atomic-level interactions, and why open-sourcing their model, Orb, is crucial for innovation.</p><p>To discover how AI is revolutionizing the fight against climate change and learn how these cutting-edge materials could shape a more sustainable future, don’t miss this inspiring conversation with Jonathan Godwin!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into Jonathan’s diverse career path and how it led him to Orbital Materials.</li><li>What types of advanced materials Orbital develops and their potential impact.</li><li>The critical role AI plays in developing materials for decarbonization purposes.</li><li>Defining foundation models and why they’re an essential part of leveraging AI.</li><li>3D atomic simulations and other types of data that go into Orbital’s foundation model.</li><li>The computing infrastructure required to build a foundation model for materials.</li><li>Engineering and other challenges encountered while building models at this scale. </li><li>How AI enhances scientific discovery without replacing human expertise.</li><li>Why open-sourcing Orbital’s foundation model, Orb, is key for innovation.</li><li>Lessons from developing this model that could be applied to other data types.</li><li>Jonathan’s detail-oriented advice for leaders of AI-powered startups.</li><li>Orbital’s exciting mission to accelerate new materials development.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We develop materials that can capture CO2 from specific gas streams – coming out of an industrial facility, new energy storage technologies that allow – [data centers] to operate behind the meter, or ways to improve the water efficiency of a data center or industrial facility.” — Jonathan Godwin</p><p><br></p><p>“Foundation models are the crux of how we're able to leverage AI in this day and age. If you want to [say], 'We're pushing the limits of what AI is able to do. We're leveraging the most recent breakthroughs,' – you've got to be building foundation models or using foundation models.” — Jonathan Godwin</p><p><br></p><p>“AI is a massively powerful creativity aid and accelerant. We’ve seen that in other areas of AI and we're bringing that to advanced materials.” — Jonathan Godwin</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.orbitalmaterials.com/">Orbital Materials</a></p><p><a href="https://www.linkedin.com/company/orbitalmaterials/">Orbital Materials on LinkedIn</a></p><p><a href="https://x.com/OrbMaterials">Orbital Materials on X</a></p><p><a href="https://github.com/orbital-materials">Orbital Materials on GitHub</a></p><p><a href="https://www.linkedin.com/in/jonathan-godwin-12907638/">Jonathan Godwin on LinkedIn</a></p><p><a href="https://x.com/jgodwin_ai">Jonathan Godwin on X</a></p><p><a href="https://substack.com/@rootnodes">Jonathan Godwin Substack</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, foundation model, climate, materials science</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/02be6028/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Foundation Model Series: Understanding Brain Activity with Dimitris Sakellariou from Piramidal</title>
      <itunes:episode>100</itunes:episode>
      <podcast:episode>100</podcast:episode>
      <itunes:title>Foundation Model Series: Understanding Brain Activity with Dimitris Sakellariou from Piramidal</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">09716714-2a6c-428f-8823-07a824d04591</guid>
      <link>https://pixelscientia.com/podcast/understanding-brain-activity-with-dimitris-sakellariou-from-piramidal/</link>
      <description>
        <![CDATA[<p>What if we could understand brain activity in real-time to better diagnose neurological conditions? In this episode, part of a special mini-series on domain-specific foundation models, I sit down with Dimitris Sakellariou, the founder and CEO of Piramidal, to talk about their groundbreaking work in automating EEG interpretation. Piramidal is focused on democratizing brain health insights, making interpreting brainwave data more accessible and accurate. With a strong foundation in neuroscience and AI, Dimitris and his team are developing models that could revolutionize how we understand brain activity and diagnose neurological conditions.</p><p>In our conversation, Dimitris explains the challenges of building a foundation model for brain activity, the role of data diversity, and the future potential for personalized brain health monitoring. Discover the implications of Piramidal’s technology beyond healthcare and its application in cognitive enhancement and stress management. Tune in as we explore how Piramidal is paving the way for personalized brain health monitoring and why this could be a game-changer for the future of medicine!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Dimitris discusses his journey from physics to a career in neuroscience.</li><li>Explore Piramidal's mission to automate EEG interpretation.</li><li>Learn about the complexity and variability of brainwave patterns</li><li>Hear how machine learning can better analyze brain activity.</li><li>Uncover the challenges of building a foundation model for EEG data.</li><li>Why diverse data sets are vital for training the foundational model.</li><li>Piramidal's plans for making EEG analysis more accessible.</li><li>Future use cases for Piramidal’s model in healthcare and beyond.</li><li>Discover why domain knowledge for model building is essential.</li><li>He shares advice for AI startup founders.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Piramidal is primarily focused at the moment in automating, or otherwise democratizing the interpretation of these tests, these brainwave recordings so that patients and people that have issues with their brain can get access to the diagnosis much, much, much faster.” — Dimitris Sakellariou</p><p>“It's very important to have discussions with neuroscientists and clinical experts in order to understand what is the end-to-end pipeline from receiving data all the way to inference.” — Dimitris Sakellariou</p><p><br></p><p>“Finding the right person. Someone that is very keen to build together with you and make important and difficult decisions can change massively a trajectory of your company.” — Dimitris Sakellariou</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/sacellarius/">Dimitris Sakellariou on LinkedIn</a></p><p><a href="https://x.com/dsacellarius">Dimitris Sakellariou on X</a></p><p><a href="https://piramidal.ai/">Piramidal</a></p><p><a href="https://www.linkedin.com/company/piramidalai/">Piramidal on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>What if we could understand brain activity in real-time to better diagnose neurological conditions? In this episode, part of a special mini-series on domain-specific foundation models, I sit down with Dimitris Sakellariou, the founder and CEO of Piramidal, to talk about their groundbreaking work in automating EEG interpretation. Piramidal is focused on democratizing brain health insights, making interpreting brainwave data more accessible and accurate. With a strong foundation in neuroscience and AI, Dimitris and his team are developing models that could revolutionize how we understand brain activity and diagnose neurological conditions.</p><p>In our conversation, Dimitris explains the challenges of building a foundation model for brain activity, the role of data diversity, and the future potential for personalized brain health monitoring. Discover the implications of Piramidal’s technology beyond healthcare and its application in cognitive enhancement and stress management. Tune in as we explore how Piramidal is paving the way for personalized brain health monitoring and why this could be a game-changer for the future of medicine!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Dimitris discusses his journey from physics to a career in neuroscience.</li><li>Explore Piramidal's mission to automate EEG interpretation.</li><li>Learn about the complexity and variability of brainwave patterns</li><li>Hear how machine learning can better analyze brain activity.</li><li>Uncover the challenges of building a foundation model for EEG data.</li><li>Why diverse data sets are vital for training the foundational model.</li><li>Piramidal's plans for making EEG analysis more accessible.</li><li>Future use cases for Piramidal’s model in healthcare and beyond.</li><li>Discover why domain knowledge for model building is essential.</li><li>He shares advice for AI startup founders.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Piramidal is primarily focused at the moment in automating, or otherwise democratizing the interpretation of these tests, these brainwave recordings so that patients and people that have issues with their brain can get access to the diagnosis much, much, much faster.” — Dimitris Sakellariou</p><p>“It's very important to have discussions with neuroscientists and clinical experts in order to understand what is the end-to-end pipeline from receiving data all the way to inference.” — Dimitris Sakellariou</p><p><br></p><p>“Finding the right person. Someone that is very keen to build together with you and make important and difficult decisions can change massively a trajectory of your company.” — Dimitris Sakellariou</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/sacellarius/">Dimitris Sakellariou on LinkedIn</a></p><p><a href="https://x.com/dsacellarius">Dimitris Sakellariou on X</a></p><p><a href="https://piramidal.ai/">Piramidal</a></p><p><a href="https://www.linkedin.com/company/piramidalai/">Piramidal on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 16 Sep 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/013c97c2/80b8bc2e.mp3" length="22806434" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/Dpks7vJLKY7_5nfTSxHiiZ-nrtEnrVdWbSyBwaaf0qU/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lYjMx/OWY1ZmY5OGVjZTU1/OTE5YjEzNjdmOTU2/ZWFhYS5qcGVn.jpg"/>
      <itunes:duration>1421</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>What if we could understand brain activity in real-time to better diagnose neurological conditions? In this episode, part of a special mini-series on domain-specific foundation models, I sit down with Dimitris Sakellariou, the founder and CEO of Piramidal, to talk about their groundbreaking work in automating EEG interpretation. Piramidal is focused on democratizing brain health insights, making interpreting brainwave data more accessible and accurate. With a strong foundation in neuroscience and AI, Dimitris and his team are developing models that could revolutionize how we understand brain activity and diagnose neurological conditions.</p><p>In our conversation, Dimitris explains the challenges of building a foundation model for brain activity, the role of data diversity, and the future potential for personalized brain health monitoring. Discover the implications of Piramidal’s technology beyond healthcare and its application in cognitive enhancement and stress management. Tune in as we explore how Piramidal is paving the way for personalized brain health monitoring and why this could be a game-changer for the future of medicine!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Dimitris discusses his journey from physics to a career in neuroscience.</li><li>Explore Piramidal's mission to automate EEG interpretation.</li><li>Learn about the complexity and variability of brainwave patterns</li><li>Hear how machine learning can better analyze brain activity.</li><li>Uncover the challenges of building a foundation model for EEG data.</li><li>Why diverse data sets are vital for training the foundational model.</li><li>Piramidal's plans for making EEG analysis more accessible.</li><li>Future use cases for Piramidal’s model in healthcare and beyond.</li><li>Discover why domain knowledge for model building is essential.</li><li>He shares advice for AI startup founders.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Piramidal is primarily focused at the moment in automating, or otherwise democratizing the interpretation of these tests, these brainwave recordings so that patients and people that have issues with their brain can get access to the diagnosis much, much, much faster.” — Dimitris Sakellariou</p><p>“It's very important to have discussions with neuroscientists and clinical experts in order to understand what is the end-to-end pipeline from receiving data all the way to inference.” — Dimitris Sakellariou</p><p><br></p><p>“Finding the right person. Someone that is very keen to build together with you and make important and difficult decisions can change massively a trajectory of your company.” — Dimitris Sakellariou</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/sacellarius/">Dimitris Sakellariou on LinkedIn</a></p><p><a href="https://x.com/dsacellarius">Dimitris Sakellariou on X</a></p><p><a href="https://piramidal.ai/">Piramidal</a></p><p><a href="https://www.linkedin.com/company/piramidalai/">Piramidal on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, brain, eeg, foundation models</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/013c97c2/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Foundation Model Series: Better, Faster, Cheaper Earth Observation with Bruno Sánchez-Andrade Nuño from Clay</title>
      <itunes:episode>99</itunes:episode>
      <podcast:episode>99</podcast:episode>
      <itunes:title>Foundation Model Series: Better, Faster, Cheaper Earth Observation with Bruno Sánchez-Andrade Nuño from Clay</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">d6d51e6d-67bc-4831-84b1-fe2956c52729</guid>
      <link>https://pixelscientia.com/podcast/better-faster-cheaper-earth-observation-with-bruno-sanchez-andrade-nuno-from-clay/</link>
      <description>
        <![CDATA[<p>Can AI be applied to enhance geospatial data for climate, nature and people? This episode kicks off a miniseries about domain-specific foundation models. Following the trends in language processing, domain-specific foundation models are enabling new possibilities for a variety of applications, including Earth observation. During this conversation, I am joined by Bruno Sánchez-Andrade Nuño, Executive Director of Clay, a nonprofit organization harnessing the power of AI for satellite images, spatial data, and more. Bruno shares the functionality and concept behind Clay, and his journey to building it. He goes on to unpack the tool’s foundation model in broad strokes, before explaining why it's important, and sharing the challenges he has faced along the way. We discuss the legal aspects of building Clay, and it’s primary goal to make it as easy as possible for any user to achieve their goals. We also touch on what the future might hold for Clay and the future of Earth observation. Thanks for listening!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing guest, Bruno Sánchez-Andrade Nuño, Executive Director at Clay.</li><li>His journey from NASA astrophysicist to climate change, social development, and AI researcher.</li><li>What Clay focuses on: using remote sensing maps to interpret the Earth’s data.</li><li>The mechanics of how Clay is used and how different feature sets compare to one another.</li><li>A broad explanation of the tool’s foundation model and why it is quicker, cheaper, and more environmentally friendly.</li><li>Two main benefits of the tool that Bruno finds most exciting. </li><li>Data and infrastructure required to build Clay including 70 million satellite and aerial images.</li><li>Measuring what the model understands and the process of compressing an image into 700 numbers.</li><li>Privacy and intellectual property in the realm of satellite imaging and mapping. </li><li>What commercial imagery could add to the model and how it might be integrated in the future. </li><li>Clay’s partnerships with university and company groups</li><li>Why the focus of Clay is to make it as easy as possible for anyone to use the tool for anything they want to do. </li><li>Challenges encountered on the road to building Clay: explaining what it is.</li><li>The complexity of benchmarking foundation models and how this relates to Clay. </li><li>Working with partners to build Clay and the rest of the ecosystem. </li><li>Lessons from building Clay that may apply to other foundation models.</li><li>Bruno’s predictions for the future of foundation models and Clay. </li><li>What is certain about the future of Clay and our understanding of Earth. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Clay is trying to figure out how to finally increase the adoption of remote sensing by leveraging a tool that itself is very complex, but the result of that tool is very easy to use.” — Bruno Sánchez-Andrade Nuño</p><p><br></p><p>“If you start with a foundational model that gets you most of the way there, [then] you can create those trials much quicker, much cheaper, and much more environmentally friendly.” — Bruno Sánchez-Andrade Nuño</p><p><br></p><p>“This is so new, we get the chance, those of us working on it, that we can save the whole industry, if you will, the whole space of AI for it.” — Bruno Sánchez-Andrade Nuño</p><p><br></p><p>“Clay, I believe, is not only the largest and most efficient model AI for Earth, for any kind of like foundational model. It is also completely open source.” — Bruno Sánchez-Andrade Nuño</p><p><br></p><p>“What we try to focus on is how can we make it as simple as possible for anyone anywhere to use this model for anything they want to do.” — Bruno Sánchez-Andrade Nuño</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://brunosan.eu/">Bruno Sánchez-Andrade</a></p><p><a href="https://x.com/brunosan/">Bruno Sánchez-Andrade Nuño on X</a></p><p><a href="https://www.linkedin.com/in/nasonurb/">Bruno Sánchez-Andrade Nuño on LinkedIn</a></p><p><a href="https://madewithclay.org/">Clay</a></p><p><a href="https://www.linkedin.com/company/made-with-clay/">Clay on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Can AI be applied to enhance geospatial data for climate, nature and people? This episode kicks off a miniseries about domain-specific foundation models. Following the trends in language processing, domain-specific foundation models are enabling new possibilities for a variety of applications, including Earth observation. During this conversation, I am joined by Bruno Sánchez-Andrade Nuño, Executive Director of Clay, a nonprofit organization harnessing the power of AI for satellite images, spatial data, and more. Bruno shares the functionality and concept behind Clay, and his journey to building it. He goes on to unpack the tool’s foundation model in broad strokes, before explaining why it's important, and sharing the challenges he has faced along the way. We discuss the legal aspects of building Clay, and it’s primary goal to make it as easy as possible for any user to achieve their goals. We also touch on what the future might hold for Clay and the future of Earth observation. Thanks for listening!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing guest, Bruno Sánchez-Andrade Nuño, Executive Director at Clay.</li><li>His journey from NASA astrophysicist to climate change, social development, and AI researcher.</li><li>What Clay focuses on: using remote sensing maps to interpret the Earth’s data.</li><li>The mechanics of how Clay is used and how different feature sets compare to one another.</li><li>A broad explanation of the tool’s foundation model and why it is quicker, cheaper, and more environmentally friendly.</li><li>Two main benefits of the tool that Bruno finds most exciting. </li><li>Data and infrastructure required to build Clay including 70 million satellite and aerial images.</li><li>Measuring what the model understands and the process of compressing an image into 700 numbers.</li><li>Privacy and intellectual property in the realm of satellite imaging and mapping. </li><li>What commercial imagery could add to the model and how it might be integrated in the future. </li><li>Clay’s partnerships with university and company groups</li><li>Why the focus of Clay is to make it as easy as possible for anyone to use the tool for anything they want to do. </li><li>Challenges encountered on the road to building Clay: explaining what it is.</li><li>The complexity of benchmarking foundation models and how this relates to Clay. </li><li>Working with partners to build Clay and the rest of the ecosystem. </li><li>Lessons from building Clay that may apply to other foundation models.</li><li>Bruno’s predictions for the future of foundation models and Clay. </li><li>What is certain about the future of Clay and our understanding of Earth. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Clay is trying to figure out how to finally increase the adoption of remote sensing by leveraging a tool that itself is very complex, but the result of that tool is very easy to use.” — Bruno Sánchez-Andrade Nuño</p><p><br></p><p>“If you start with a foundational model that gets you most of the way there, [then] you can create those trials much quicker, much cheaper, and much more environmentally friendly.” — Bruno Sánchez-Andrade Nuño</p><p><br></p><p>“This is so new, we get the chance, those of us working on it, that we can save the whole industry, if you will, the whole space of AI for it.” — Bruno Sánchez-Andrade Nuño</p><p><br></p><p>“Clay, I believe, is not only the largest and most efficient model AI for Earth, for any kind of like foundational model. It is also completely open source.” — Bruno Sánchez-Andrade Nuño</p><p><br></p><p>“What we try to focus on is how can we make it as simple as possible for anyone anywhere to use this model for anything they want to do.” — Bruno Sánchez-Andrade Nuño</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://brunosan.eu/">Bruno Sánchez-Andrade</a></p><p><a href="https://x.com/brunosan/">Bruno Sánchez-Andrade Nuño on X</a></p><p><a href="https://www.linkedin.com/in/nasonurb/">Bruno Sánchez-Andrade Nuño on LinkedIn</a></p><p><a href="https://madewithclay.org/">Clay</a></p><p><a href="https://www.linkedin.com/company/made-with-clay/">Clay on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 09 Sep 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/4bb30972/883cdc45.mp3" length="34359955" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/TMHTVZ_S9DOAjvD4CiJbtwS9eomwukQwrB931zEOXjw/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS82MTZl/MmU1NDM4NWY0MTc1/OTI3MGY1YWU1MDky/MWMyNy5qcGVn.jpg"/>
      <itunes:duration>2135</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Can AI be applied to enhance geospatial data for climate, nature and people? This episode kicks off a miniseries about domain-specific foundation models. Following the trends in language processing, domain-specific foundation models are enabling new possibilities for a variety of applications, including Earth observation. During this conversation, I am joined by Bruno Sánchez-Andrade Nuño, Executive Director of Clay, a nonprofit organization harnessing the power of AI for satellite images, spatial data, and more. Bruno shares the functionality and concept behind Clay, and his journey to building it. He goes on to unpack the tool’s foundation model in broad strokes, before explaining why it's important, and sharing the challenges he has faced along the way. We discuss the legal aspects of building Clay, and it’s primary goal to make it as easy as possible for any user to achieve their goals. We also touch on what the future might hold for Clay and the future of Earth observation. Thanks for listening!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing guest, Bruno Sánchez-Andrade Nuño, Executive Director at Clay.</li><li>His journey from NASA astrophysicist to climate change, social development, and AI researcher.</li><li>What Clay focuses on: using remote sensing maps to interpret the Earth’s data.</li><li>The mechanics of how Clay is used and how different feature sets compare to one another.</li><li>A broad explanation of the tool’s foundation model and why it is quicker, cheaper, and more environmentally friendly.</li><li>Two main benefits of the tool that Bruno finds most exciting. </li><li>Data and infrastructure required to build Clay including 70 million satellite and aerial images.</li><li>Measuring what the model understands and the process of compressing an image into 700 numbers.</li><li>Privacy and intellectual property in the realm of satellite imaging and mapping. </li><li>What commercial imagery could add to the model and how it might be integrated in the future. </li><li>Clay’s partnerships with university and company groups</li><li>Why the focus of Clay is to make it as easy as possible for anyone to use the tool for anything they want to do. </li><li>Challenges encountered on the road to building Clay: explaining what it is.</li><li>The complexity of benchmarking foundation models and how this relates to Clay. </li><li>Working with partners to build Clay and the rest of the ecosystem. </li><li>Lessons from building Clay that may apply to other foundation models.</li><li>Bruno’s predictions for the future of foundation models and Clay. </li><li>What is certain about the future of Clay and our understanding of Earth. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Clay is trying to figure out how to finally increase the adoption of remote sensing by leveraging a tool that itself is very complex, but the result of that tool is very easy to use.” — Bruno Sánchez-Andrade Nuño</p><p><br></p><p>“If you start with a foundational model that gets you most of the way there, [then] you can create those trials much quicker, much cheaper, and much more environmentally friendly.” — Bruno Sánchez-Andrade Nuño</p><p><br></p><p>“This is so new, we get the chance, those of us working on it, that we can save the whole industry, if you will, the whole space of AI for it.” — Bruno Sánchez-Andrade Nuño</p><p><br></p><p>“Clay, I believe, is not only the largest and most efficient model AI for Earth, for any kind of like foundational model. It is also completely open source.” — Bruno Sánchez-Andrade Nuño</p><p><br></p><p>“What we try to focus on is how can we make it as simple as possible for anyone anywhere to use this model for anything they want to do.” — Bruno Sánchez-Andrade Nuño</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://brunosan.eu/">Bruno Sánchez-Andrade</a></p><p><a href="https://x.com/brunosan/">Bruno Sánchez-Andrade Nuño on X</a></p><p><a href="https://www.linkedin.com/in/nasonurb/">Bruno Sánchez-Andrade Nuño on LinkedIn</a></p><p><a href="https://madewithclay.org/">Clay</a></p><p><a href="https://www.linkedin.com/company/made-with-clay/">Clay on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, earth observation, remote sensing, foundation models</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/4bb30972/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Evolutionary Insights for Drug Discovery with Ashley Zehnder from Fauna Bio</title>
      <itunes:episode>98</itunes:episode>
      <podcast:episode>98</podcast:episode>
      <itunes:title>Evolutionary Insights for Drug Discovery with Ashley Zehnder from Fauna Bio</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">bcfb4a73-905b-4ed1-9b7c-e4c5720bbdd8</guid>
      <link>https://pixelscientia.com/podcast/evolutionary-insights-for-drug-discovery-with-ashley-zehnder-from-fauna-bio/</link>
      <description>
        <![CDATA[<p>In a world where conventional drug discovery methods frequently fall short, today's guest addresses the critical challenge of fighting human diseases by drawing inspiration from nature’s most resilient creatures. Could the secret to overcoming our most stubborn illnesses lie in the extraordinary adaptability of extreme mammals? Veterinarian-scientist Ashley Zehnder, the Co-founder and CEO of AI-driven drug discovery company Fauna Bio, believes so.</p><p>By leveraging data from 100 million years of evolved disease resistance in mammals, Ashley sees a unique opportunity at the crossroads of genomics and emerging model species to improve health for all species, including humans. In this episode, she explores how harnessing the biological secrets of these animals using AI and machine learning could revolutionize medicine, leading to breakthroughs that benefit us all. Tune in to discover how Fauna Bio is pioneering a new frontier in drug discovery and how understanding the resilience of these creatures could reshape the future of healthcare!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into the diverse backgrounds of Fauna Bio’s founding members.</li><li>Ways that Fauna Bio uses AI and genomics to identify key targets for new therapeutics.</li><li>The role machine learning plays in analyzing and annotating large volumes of data.</li><li>Gene expression and other data inputs that drive Fauna Bio’s discoveries.</li><li>The collaborative effort required to collate datasets from 400+ mammals.</li><li>Challenges of working with genomic data and training ML models on it.</li><li>How Fauna Bio rigorously validates their AI-driven discoveries.</li><li>Cooperation between ML developers and domain experts to advance this technology.</li><li>Technological advancements that enable Fauna Bio’s innovations.</li><li>Ashely’s advice on differentiation for leaders of AI-powered startups.</li><li>Where she sees Fauna Bio making the biggest impact in the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[Fauna Bio uses] AI and genomics as a way to identify the most impactful targets for new therapeutic programs across a broad number of diseases.” — Ashley Zehnder</p><p><br></p><p>“It’s certainly easier than it has been in the past to generate very high-quality single-cell RNA sequencing. We’re doing a lot of that. The challenges on the technical side are getting much easier. The challenges on the interpretation side are still there.” — Ashley Zehnder</p><p><br></p><p>“There are many points along the drug discovery path where AI companies can differentiate. But that story has to be clear because, otherwise, it's very hard to get out of the signal-to-noise that is the AI discovery landscape in biopharma” — Ashley Zehnder</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.faunabio.com/">Fauna Bio</a></p><p><a href="https://www.linkedin.com/in/ashley-zehnder-31950827/">Ashley Zehnder on LinkedIn</a></p><p><a href="https://x.com/ashleyz413">Ashley Zehnder on X</a></p><p><a href="mailto:ashley@faunabio.com">Ashley Zehnder Email</a></p><p><a href="https://zoonomiaproject.org/">Zoonomia Project</a></p><p><a href="https://www.science.org/toc/science/380/6643">Science Issue dedicated to the Zoonomia Project</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In a world where conventional drug discovery methods frequently fall short, today's guest addresses the critical challenge of fighting human diseases by drawing inspiration from nature’s most resilient creatures. Could the secret to overcoming our most stubborn illnesses lie in the extraordinary adaptability of extreme mammals? Veterinarian-scientist Ashley Zehnder, the Co-founder and CEO of AI-driven drug discovery company Fauna Bio, believes so.</p><p>By leveraging data from 100 million years of evolved disease resistance in mammals, Ashley sees a unique opportunity at the crossroads of genomics and emerging model species to improve health for all species, including humans. In this episode, she explores how harnessing the biological secrets of these animals using AI and machine learning could revolutionize medicine, leading to breakthroughs that benefit us all. Tune in to discover how Fauna Bio is pioneering a new frontier in drug discovery and how understanding the resilience of these creatures could reshape the future of healthcare!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into the diverse backgrounds of Fauna Bio’s founding members.</li><li>Ways that Fauna Bio uses AI and genomics to identify key targets for new therapeutics.</li><li>The role machine learning plays in analyzing and annotating large volumes of data.</li><li>Gene expression and other data inputs that drive Fauna Bio’s discoveries.</li><li>The collaborative effort required to collate datasets from 400+ mammals.</li><li>Challenges of working with genomic data and training ML models on it.</li><li>How Fauna Bio rigorously validates their AI-driven discoveries.</li><li>Cooperation between ML developers and domain experts to advance this technology.</li><li>Technological advancements that enable Fauna Bio’s innovations.</li><li>Ashely’s advice on differentiation for leaders of AI-powered startups.</li><li>Where she sees Fauna Bio making the biggest impact in the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[Fauna Bio uses] AI and genomics as a way to identify the most impactful targets for new therapeutic programs across a broad number of diseases.” — Ashley Zehnder</p><p><br></p><p>“It’s certainly easier than it has been in the past to generate very high-quality single-cell RNA sequencing. We’re doing a lot of that. The challenges on the technical side are getting much easier. The challenges on the interpretation side are still there.” — Ashley Zehnder</p><p><br></p><p>“There are many points along the drug discovery path where AI companies can differentiate. But that story has to be clear because, otherwise, it's very hard to get out of the signal-to-noise that is the AI discovery landscape in biopharma” — Ashley Zehnder</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.faunabio.com/">Fauna Bio</a></p><p><a href="https://www.linkedin.com/in/ashley-zehnder-31950827/">Ashley Zehnder on LinkedIn</a></p><p><a href="https://x.com/ashleyz413">Ashley Zehnder on X</a></p><p><a href="mailto:ashley@faunabio.com">Ashley Zehnder Email</a></p><p><a href="https://zoonomiaproject.org/">Zoonomia Project</a></p><p><a href="https://www.science.org/toc/science/380/6643">Science Issue dedicated to the Zoonomia Project</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 02 Sep 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/746a5f7d/48a65eca.mp3" length="26294284" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/lAnWk-eyJTwY7uQaRt-vlqD0pZG3Xa8l-ttUKiRpFc0/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS84NWQy/OGRhMDU1MDFiZDVl/ZmYzNjM5OTYzY2Rm/Y2QxZS5qcGVn.jpg"/>
      <itunes:duration>1637</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In a world where conventional drug discovery methods frequently fall short, today's guest addresses the critical challenge of fighting human diseases by drawing inspiration from nature’s most resilient creatures. Could the secret to overcoming our most stubborn illnesses lie in the extraordinary adaptability of extreme mammals? Veterinarian-scientist Ashley Zehnder, the Co-founder and CEO of AI-driven drug discovery company Fauna Bio, believes so.</p><p>By leveraging data from 100 million years of evolved disease resistance in mammals, Ashley sees a unique opportunity at the crossroads of genomics and emerging model species to improve health for all species, including humans. In this episode, she explores how harnessing the biological secrets of these animals using AI and machine learning could revolutionize medicine, leading to breakthroughs that benefit us all. Tune in to discover how Fauna Bio is pioneering a new frontier in drug discovery and how understanding the resilience of these creatures could reshape the future of healthcare!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into the diverse backgrounds of Fauna Bio’s founding members.</li><li>Ways that Fauna Bio uses AI and genomics to identify key targets for new therapeutics.</li><li>The role machine learning plays in analyzing and annotating large volumes of data.</li><li>Gene expression and other data inputs that drive Fauna Bio’s discoveries.</li><li>The collaborative effort required to collate datasets from 400+ mammals.</li><li>Challenges of working with genomic data and training ML models on it.</li><li>How Fauna Bio rigorously validates their AI-driven discoveries.</li><li>Cooperation between ML developers and domain experts to advance this technology.</li><li>Technological advancements that enable Fauna Bio’s innovations.</li><li>Ashely’s advice on differentiation for leaders of AI-powered startups.</li><li>Where she sees Fauna Bio making the biggest impact in the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[Fauna Bio uses] AI and genomics as a way to identify the most impactful targets for new therapeutic programs across a broad number of diseases.” — Ashley Zehnder</p><p><br></p><p>“It’s certainly easier than it has been in the past to generate very high-quality single-cell RNA sequencing. We’re doing a lot of that. The challenges on the technical side are getting much easier. The challenges on the interpretation side are still there.” — Ashley Zehnder</p><p><br></p><p>“There are many points along the drug discovery path where AI companies can differentiate. But that story has to be clear because, otherwise, it's very hard to get out of the signal-to-noise that is the AI discovery landscape in biopharma” — Ashley Zehnder</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.faunabio.com/">Fauna Bio</a></p><p><a href="https://www.linkedin.com/in/ashley-zehnder-31950827/">Ashley Zehnder on LinkedIn</a></p><p><a href="https://x.com/ashleyz413">Ashley Zehnder on X</a></p><p><a href="mailto:ashley@faunabio.com">Ashley Zehnder Email</a></p><p><a href="https://zoonomiaproject.org/">Zoonomia Project</a></p><p><a href="https://www.science.org/toc/science/380/6643">Science Issue dedicated to the Zoonomia Project</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, drug discovery, life science</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/746a5f7d/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Better Therapeutics Using Lab-Grown Tissue with Andrei Georgescu from Vivodyne</title>
      <itunes:episode>97</itunes:episode>
      <podcast:episode>97</podcast:episode>
      <itunes:title>Better Therapeutics Using Lab-Grown Tissue with Andrei Georgescu from Vivodyne</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">807f5686-438a-4f7d-881b-030cce76a84d</guid>
      <link>https://pixelscientia.com/podcast/better-therapeutics-using-lab-grown-tissue-with-andrei-georgescu-from-vivodyne/</link>
      <description>
        <![CDATA[<p>One of the biggest hurdles in medical research is the gap between animal studies and human trials, a disconnect that often leads to failed drug tests and wasted resources. But what if there was a way to bridge that gap and create treatments that are more effective for humans from the start?</p><p>Today, I am joined by Dr. Andrei Georgescu, Founder and CEO of Vivodyne, a groundbreaking biotechnology company that is transforming how scientists study human biology and develop new therapeutics. In this episode, he reveals how Vivodyne harnesses lab-grown tissue and advanced multimodal AI to create more effective therapeutics. We explore the challenges of gathering human tissue data, the collaboration between biologists, robotics engineers, and machine learning developers to build powerful machine learning models, and the profound impact that Vivodyne is poised to make in the fight against diseases. To discover how Vivodyne’s innovations can lead to more successful treatments and faster drug development, tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into Andrei’s background and how it led him to create Vivodyne.</li><li>What Vivodyne does and why it’s so important for drug discovery.</li><li>The role that AI and machine learning play in analyzing vast amounts of data.</li><li>Different data inputs and outputs for Vivodyne’s advanced multimodal AI.</li><li>The value of biased and unbiased AI outputs depending on the context.</li><li>Why interpretability and explainability are crucial in fields like biotechnology.</li><li>Challenges associated with collecting human tissue data to train Vivodyne’s models.</li><li>What goes into validating Vivodyne’s machine learning models.</li><li>Difficulties in integrating biology knowledge with robotics and machine learning.</li><li>Andrei’s business-focused advice for technical founders.</li><li>The profound impact that Vivodyne will have on drug discovery in the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Vivodyne grows human tissues at a very large scale so that we can understand human physiology and we can test directly on it in order to discover and develop better drugs that are both safer and more efficacious.” — Andrei Georgescu</p><p><br></p><p>“We use machine learning and AI as a mechanism to understand the complexity of very deep data and to very efficiently apply that complexity and infer from what we've learned across the very large breadth of data that we collect.” — Andrei Georgescu</p><p><br></p><p>“To address [the problem of a] glaring lack of trainable data, we create that data by growing it at scale.” — Andrei Georgescu</p><p><br></p><p>“If you're a technical founder, do something that is incredibly hard because the ability for you to do that thing will grant you much more leverage than creating what is otherwise a much more simple and generic business.” — Andrei Georgescu</p><p><br></p><p>“[With Vivodyne], we will enter a world of plenty where the development of new drugs against diseases becomes a far more successful, reliable, and predictive process, and we're able to make much safer and much more effective drugs just by virtue of being able to optimize that therapeutic on human tissues before giving it to people for the first time in-clinic.” — Andrei Georgescu</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://ageorgescu.com/">Andrei Georgescu</a></p><p><a href="https://www.vivodyne.com/">Vivodyne</a></p><p><a href="https://www.linkedin.com/in/andrei-georgescu-phd/">Andrei Georgescu on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>One of the biggest hurdles in medical research is the gap between animal studies and human trials, a disconnect that often leads to failed drug tests and wasted resources. But what if there was a way to bridge that gap and create treatments that are more effective for humans from the start?</p><p>Today, I am joined by Dr. Andrei Georgescu, Founder and CEO of Vivodyne, a groundbreaking biotechnology company that is transforming how scientists study human biology and develop new therapeutics. In this episode, he reveals how Vivodyne harnesses lab-grown tissue and advanced multimodal AI to create more effective therapeutics. We explore the challenges of gathering human tissue data, the collaboration between biologists, robotics engineers, and machine learning developers to build powerful machine learning models, and the profound impact that Vivodyne is poised to make in the fight against diseases. To discover how Vivodyne’s innovations can lead to more successful treatments and faster drug development, tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into Andrei’s background and how it led him to create Vivodyne.</li><li>What Vivodyne does and why it’s so important for drug discovery.</li><li>The role that AI and machine learning play in analyzing vast amounts of data.</li><li>Different data inputs and outputs for Vivodyne’s advanced multimodal AI.</li><li>The value of biased and unbiased AI outputs depending on the context.</li><li>Why interpretability and explainability are crucial in fields like biotechnology.</li><li>Challenges associated with collecting human tissue data to train Vivodyne’s models.</li><li>What goes into validating Vivodyne’s machine learning models.</li><li>Difficulties in integrating biology knowledge with robotics and machine learning.</li><li>Andrei’s business-focused advice for technical founders.</li><li>The profound impact that Vivodyne will have on drug discovery in the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Vivodyne grows human tissues at a very large scale so that we can understand human physiology and we can test directly on it in order to discover and develop better drugs that are both safer and more efficacious.” — Andrei Georgescu</p><p><br></p><p>“We use machine learning and AI as a mechanism to understand the complexity of very deep data and to very efficiently apply that complexity and infer from what we've learned across the very large breadth of data that we collect.” — Andrei Georgescu</p><p><br></p><p>“To address [the problem of a] glaring lack of trainable data, we create that data by growing it at scale.” — Andrei Georgescu</p><p><br></p><p>“If you're a technical founder, do something that is incredibly hard because the ability for you to do that thing will grant you much more leverage than creating what is otherwise a much more simple and generic business.” — Andrei Georgescu</p><p><br></p><p>“[With Vivodyne], we will enter a world of plenty where the development of new drugs against diseases becomes a far more successful, reliable, and predictive process, and we're able to make much safer and much more effective drugs just by virtue of being able to optimize that therapeutic on human tissues before giving it to people for the first time in-clinic.” — Andrei Georgescu</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://ageorgescu.com/">Andrei Georgescu</a></p><p><a href="https://www.vivodyne.com/">Vivodyne</a></p><p><a href="https://www.linkedin.com/in/andrei-georgescu-phd/">Andrei Georgescu on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 26 Aug 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/71001778/4b23c894.mp3" length="48934570" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/Pzthc_nezATd_LScRBUbjyQzroxpsmURZ8c3g5ZlZsE/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS8zNDE1/YmVkZjNkMjE5MzJj/ODM0ZmM3NDBkYzMy/NGFmMy5wbmc.jpg"/>
      <itunes:duration>2037</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>One of the biggest hurdles in medical research is the gap between animal studies and human trials, a disconnect that often leads to failed drug tests and wasted resources. But what if there was a way to bridge that gap and create treatments that are more effective for humans from the start?</p><p>Today, I am joined by Dr. Andrei Georgescu, Founder and CEO of Vivodyne, a groundbreaking biotechnology company that is transforming how scientists study human biology and develop new therapeutics. In this episode, he reveals how Vivodyne harnesses lab-grown tissue and advanced multimodal AI to create more effective therapeutics. We explore the challenges of gathering human tissue data, the collaboration between biologists, robotics engineers, and machine learning developers to build powerful machine learning models, and the profound impact that Vivodyne is poised to make in the fight against diseases. To discover how Vivodyne’s innovations can lead to more successful treatments and faster drug development, tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into Andrei’s background and how it led him to create Vivodyne.</li><li>What Vivodyne does and why it’s so important for drug discovery.</li><li>The role that AI and machine learning play in analyzing vast amounts of data.</li><li>Different data inputs and outputs for Vivodyne’s advanced multimodal AI.</li><li>The value of biased and unbiased AI outputs depending on the context.</li><li>Why interpretability and explainability are crucial in fields like biotechnology.</li><li>Challenges associated with collecting human tissue data to train Vivodyne’s models.</li><li>What goes into validating Vivodyne’s machine learning models.</li><li>Difficulties in integrating biology knowledge with robotics and machine learning.</li><li>Andrei’s business-focused advice for technical founders.</li><li>The profound impact that Vivodyne will have on drug discovery in the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Vivodyne grows human tissues at a very large scale so that we can understand human physiology and we can test directly on it in order to discover and develop better drugs that are both safer and more efficacious.” — Andrei Georgescu</p><p><br></p><p>“We use machine learning and AI as a mechanism to understand the complexity of very deep data and to very efficiently apply that complexity and infer from what we've learned across the very large breadth of data that we collect.” — Andrei Georgescu</p><p><br></p><p>“To address [the problem of a] glaring lack of trainable data, we create that data by growing it at scale.” — Andrei Georgescu</p><p><br></p><p>“If you're a technical founder, do something that is incredibly hard because the ability for you to do that thing will grant you much more leverage than creating what is otherwise a much more simple and generic business.” — Andrei Georgescu</p><p><br></p><p>“[With Vivodyne], we will enter a world of plenty where the development of new drugs against diseases becomes a far more successful, reliable, and predictive process, and we're able to make much safer and much more effective drugs just by virtue of being able to optimize that therapeutic on human tissues before giving it to people for the first time in-clinic.” — Andrei Georgescu</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://ageorgescu.com/">Andrei Georgescu</a></p><p><a href="https://www.vivodyne.com/">Vivodyne</a></p><p><a href="https://www.linkedin.com/in/andrei-georgescu-phd/">Andrei Georgescu on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, drug discovery, drug development</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/71001778/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Accelerating Regenerative Agriculture with Marie Coffin from CIBO Technologies</title>
      <itunes:episode>96</itunes:episode>
      <podcast:episode>96</podcast:episode>
      <itunes:title>Accelerating Regenerative Agriculture with Marie Coffin from CIBO Technologies</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">105e064e-70d0-4508-99f6-a40ca9ceaa04</guid>
      <link>https://pixelscientia.com/podcast/accelerating-regenerative-agriculture-with-marie-coffin-from-cibo-technologies/</link>
      <description>
        <![CDATA[<p>Marie Coffin is the Vice President of Science and Modeling at CIBO Technologies, and she is with me today to discuss regenerative agriculture. Join us as we explore CIBO’s work to influence company carbon footprints across industries, and how machine learning supports this process through remote sensing. Delving deeper, Marie unpacks how satellite imagery integrates with their computer vision system for a more scalable solution. Next, we discuss obtaining and categorizing data in the US, exploring some of the obstacles that stem from privacy and data protection concerns. We touch on data quality and discuss the reason behind the geographical parameters they have applied to the work before Marie shares her approach to collaborating with external experts and agronomists. She offers her advice for startups in the tech space, emphasizing creating value for your clients over keeping up with trends, predicts the future endeavors that CIBO will focus on, and more. Thanks for listening! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Marie Coffin and her background leading up to her role at CIBO Technologies.</li><li>CIBO’s work to influence company carbon footprints to improve agricultural sustainability.</li><li>The role of machine learning in this process: remote sensing.</li><li>What remote sensing is used for at CIBO.</li><li>How satellite imagery interacts with their computer vision system. </li><li>Gathering, labeling, and annotating data with a focus on the boundary of the field. </li><li>Obtaining this information through a farmer’s recording process. </li><li>Why their work is largely limited to the US at the moment. </li><li>Challenges related to privacy and data protection while working with training models.</li><li>Managing data quality issues.</li><li>Validating models within a geographical context. </li><li>Collaborating with domain experts and external agronomists to understand and validate thier approaches.</li><li>How the seasonal nature of agriculture impacts the timing of reports and outputs. </li><li>Advice for tech startups; addressing trends, who to hire, and more.</li><li>Qualities Marie seeks in new hires. </li><li>Her prediction for CIBO’s growing impact in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“It’s pretty straightforward to estimate the carbon footprint of a single farmer’s field or even the carbon footprint of a whole farm, but, to make an impact, we need to be able to scale that across the landscape.” — Marie Coffin</p><p><br></p><p>“That is really the biggest challenge; it’s just getting enough data.” — Marie Coffin</p><p><br></p><p>“When you’re working in a really cutting-edge area, it’s tempting to sort of get caught up in the buzz of the new technology and lose sight of what the customer needs.” — Marie Coffin</p><p><br></p><p>“We need to not always be following the latest, greatest advance. We need to be going in a direction that’s going to really provide value.” — Marie Coffin</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.cibotechnologies.com/">CIBO Technologies</a></p><p><a href="https://www.linkedin.com/in/marie-coffin/">Marie Coffin on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Marie Coffin is the Vice President of Science and Modeling at CIBO Technologies, and she is with me today to discuss regenerative agriculture. Join us as we explore CIBO’s work to influence company carbon footprints across industries, and how machine learning supports this process through remote sensing. Delving deeper, Marie unpacks how satellite imagery integrates with their computer vision system for a more scalable solution. Next, we discuss obtaining and categorizing data in the US, exploring some of the obstacles that stem from privacy and data protection concerns. We touch on data quality and discuss the reason behind the geographical parameters they have applied to the work before Marie shares her approach to collaborating with external experts and agronomists. She offers her advice for startups in the tech space, emphasizing creating value for your clients over keeping up with trends, predicts the future endeavors that CIBO will focus on, and more. Thanks for listening! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Marie Coffin and her background leading up to her role at CIBO Technologies.</li><li>CIBO’s work to influence company carbon footprints to improve agricultural sustainability.</li><li>The role of machine learning in this process: remote sensing.</li><li>What remote sensing is used for at CIBO.</li><li>How satellite imagery interacts with their computer vision system. </li><li>Gathering, labeling, and annotating data with a focus on the boundary of the field. </li><li>Obtaining this information through a farmer’s recording process. </li><li>Why their work is largely limited to the US at the moment. </li><li>Challenges related to privacy and data protection while working with training models.</li><li>Managing data quality issues.</li><li>Validating models within a geographical context. </li><li>Collaborating with domain experts and external agronomists to understand and validate thier approaches.</li><li>How the seasonal nature of agriculture impacts the timing of reports and outputs. </li><li>Advice for tech startups; addressing trends, who to hire, and more.</li><li>Qualities Marie seeks in new hires. </li><li>Her prediction for CIBO’s growing impact in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“It’s pretty straightforward to estimate the carbon footprint of a single farmer’s field or even the carbon footprint of a whole farm, but, to make an impact, we need to be able to scale that across the landscape.” — Marie Coffin</p><p><br></p><p>“That is really the biggest challenge; it’s just getting enough data.” — Marie Coffin</p><p><br></p><p>“When you’re working in a really cutting-edge area, it’s tempting to sort of get caught up in the buzz of the new technology and lose sight of what the customer needs.” — Marie Coffin</p><p><br></p><p>“We need to not always be following the latest, greatest advance. We need to be going in a direction that’s going to really provide value.” — Marie Coffin</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.cibotechnologies.com/">CIBO Technologies</a></p><p><a href="https://www.linkedin.com/in/marie-coffin/">Marie Coffin on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 19 Aug 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/5a29373a/d47ef4db.mp3" length="15484380" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/9pJv5wCVHy51j9fxEmxUht41ehM4insAVwy47jkS1h8/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9hM2E4/ZmU5YTg1OWU1MTg5/MjQzOTMwZTNhNWZl/ODNjMS5qcGVn.jpg"/>
      <itunes:duration>962</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Marie Coffin is the Vice President of Science and Modeling at CIBO Technologies, and she is with me today to discuss regenerative agriculture. Join us as we explore CIBO’s work to influence company carbon footprints across industries, and how machine learning supports this process through remote sensing. Delving deeper, Marie unpacks how satellite imagery integrates with their computer vision system for a more scalable solution. Next, we discuss obtaining and categorizing data in the US, exploring some of the obstacles that stem from privacy and data protection concerns. We touch on data quality and discuss the reason behind the geographical parameters they have applied to the work before Marie shares her approach to collaborating with external experts and agronomists. She offers her advice for startups in the tech space, emphasizing creating value for your clients over keeping up with trends, predicts the future endeavors that CIBO will focus on, and more. Thanks for listening! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Marie Coffin and her background leading up to her role at CIBO Technologies.</li><li>CIBO’s work to influence company carbon footprints to improve agricultural sustainability.</li><li>The role of machine learning in this process: remote sensing.</li><li>What remote sensing is used for at CIBO.</li><li>How satellite imagery interacts with their computer vision system. </li><li>Gathering, labeling, and annotating data with a focus on the boundary of the field. </li><li>Obtaining this information through a farmer’s recording process. </li><li>Why their work is largely limited to the US at the moment. </li><li>Challenges related to privacy and data protection while working with training models.</li><li>Managing data quality issues.</li><li>Validating models within a geographical context. </li><li>Collaborating with domain experts and external agronomists to understand and validate thier approaches.</li><li>How the seasonal nature of agriculture impacts the timing of reports and outputs. </li><li>Advice for tech startups; addressing trends, who to hire, and more.</li><li>Qualities Marie seeks in new hires. </li><li>Her prediction for CIBO’s growing impact in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“It’s pretty straightforward to estimate the carbon footprint of a single farmer’s field or even the carbon footprint of a whole farm, but, to make an impact, we need to be able to scale that across the landscape.” — Marie Coffin</p><p><br></p><p>“That is really the biggest challenge; it’s just getting enough data.” — Marie Coffin</p><p><br></p><p>“When you’re working in a really cutting-edge area, it’s tempting to sort of get caught up in the buzz of the new technology and lose sight of what the customer needs.” — Marie Coffin</p><p><br></p><p>“We need to not always be following the latest, greatest advance. We need to be going in a direction that’s going to really provide value.” — Marie Coffin</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.cibotechnologies.com/">CIBO Technologies</a></p><p><a href="https://www.linkedin.com/in/marie-coffin/">Marie Coffin on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, agriculture, regenerative agriculture</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/5a29373a/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Measuring Biodiversity Using Insects with Mads Fogtmann from Fauna Photonics</title>
      <itunes:episode>95</itunes:episode>
      <podcast:episode>95</podcast:episode>
      <itunes:title>Measuring Biodiversity Using Insects with Mads Fogtmann from Fauna Photonics</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">fafe7429-0681-44c2-9d5f-7a8e731e922f</guid>
      <link>https://pixelscientia.com/podcast/measuring-biodiversity-using-insects-with-mads-fogtmann-from-fauna-photonics/</link>
      <description>
        <![CDATA[<p>What if technology could be the key to averting a biodiversity crisis? Today, I explore this possibility with Mads Fogtmann, Chief Data Officer of FaunaPhotonics, as we discuss their groundbreaking approach to biodiversity monitoring. I talk with Mads about the looming biodiversity crisis, the innovative solutions his team is developing to address the urgent need for scalable biodiversity monitoring, and the central role that humans have to play in all this. Find out how the FaunaPhotonics platform is employing advanced sensing technology and machine learning to protect ecosystems, why insects are such useful proxies for monitoring ecosystem health, and their successful partnerships with other domain experts and researchers. Our conversation also covers the broader implications of biodiversity loss, the role of public awareness in conservation, and the future of biodiversity monitoring. Join us for a comprehensive and insightful discussion on how technology can help safeguard our planet's future and ensure the stability of natural and human systems alike!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Some background on Mads and his transition from academia to the private sector.</li><li>The FaunaPhotonics platform and how it monitors biodiversity.</li><li>An overview of the biodiversity crisis and the urgent need to address it.</li><li>Understanding our connection to, and dependence on, nature.</li><li>The risks that the biodiversity crisis poses for supply chains.</li><li>FaunaPhotonics’ role in measuring the biodiversity crisis: why this protects ecosystems.</li><li>Why insects are the best available proxy for measuring ecosystem health.</li><li>How sensing technology and machine learning are utilized by FaunaPhotonics.</li><li>Case studies showcasing the impact of FaunaPhotonics' technology.</li><li>Future directions and innovations in biodiversity monitoring.</li><li>Key challenges faced in developing and deploying biodiversity monitoring technology.</li><li>FaunaPhotonics’ collaboration with other domain experts and researchers in the field.</li><li>Why there is such an urgent need for scaleable biodiversity monitoring.</li><li>The importance of public awareness and education in addressing the biodiversity crisis.</li><li>Mads’ advice to leaders of other AI-powered startups and the future of FaunaPhotonics.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“The clothes we wear, the food we eat, the water we drink, the material we use to build houses: everything comes from nature. And right now, we are destroying that foundation rapidly.” — Mads Fogtmann</p><p><br></p><p>“I think it’s important that we become more aware that we are an integral part of nature.” — Mads Fogtmann</p><p><br></p><p>“If you can’t measure it, then how can you protect the rights? – [We come with the solution] that allows them to measure [the impact on biodiversity] so they can protect it. We do this by using insect sensing. The reason we do this is that insects are so fundamental to the ecosystem.” — Mads Fogtmann</p><p>“Insects are the best proxy that you can have for actually measuring the health of [an] ecosystem.” — Mads Fogtmann</p><p><br></p><p>“There’s a huge need and an interest in ‘how we can actually scale biodiversity monitoring to kind of help us understand what’s going on with nature at the moment.’” — Mads Fogtmann</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/madsfogtmannhansen/?original_referer=https://duckduckgo.com/&amp;originalSubdomain=dk">Mads Fogtmann on LinkedIn<br></a><a href="https://faunaphotonics.com/">FaunaPhotonics</a></p><p><a href="https://www.linkedin.com/company/faunaphotonics/">FaunaPhotonics on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>What if technology could be the key to averting a biodiversity crisis? Today, I explore this possibility with Mads Fogtmann, Chief Data Officer of FaunaPhotonics, as we discuss their groundbreaking approach to biodiversity monitoring. I talk with Mads about the looming biodiversity crisis, the innovative solutions his team is developing to address the urgent need for scalable biodiversity monitoring, and the central role that humans have to play in all this. Find out how the FaunaPhotonics platform is employing advanced sensing technology and machine learning to protect ecosystems, why insects are such useful proxies for monitoring ecosystem health, and their successful partnerships with other domain experts and researchers. Our conversation also covers the broader implications of biodiversity loss, the role of public awareness in conservation, and the future of biodiversity monitoring. Join us for a comprehensive and insightful discussion on how technology can help safeguard our planet's future and ensure the stability of natural and human systems alike!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Some background on Mads and his transition from academia to the private sector.</li><li>The FaunaPhotonics platform and how it monitors biodiversity.</li><li>An overview of the biodiversity crisis and the urgent need to address it.</li><li>Understanding our connection to, and dependence on, nature.</li><li>The risks that the biodiversity crisis poses for supply chains.</li><li>FaunaPhotonics’ role in measuring the biodiversity crisis: why this protects ecosystems.</li><li>Why insects are the best available proxy for measuring ecosystem health.</li><li>How sensing technology and machine learning are utilized by FaunaPhotonics.</li><li>Case studies showcasing the impact of FaunaPhotonics' technology.</li><li>Future directions and innovations in biodiversity monitoring.</li><li>Key challenges faced in developing and deploying biodiversity monitoring technology.</li><li>FaunaPhotonics’ collaboration with other domain experts and researchers in the field.</li><li>Why there is such an urgent need for scaleable biodiversity monitoring.</li><li>The importance of public awareness and education in addressing the biodiversity crisis.</li><li>Mads’ advice to leaders of other AI-powered startups and the future of FaunaPhotonics.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“The clothes we wear, the food we eat, the water we drink, the material we use to build houses: everything comes from nature. And right now, we are destroying that foundation rapidly.” — Mads Fogtmann</p><p><br></p><p>“I think it’s important that we become more aware that we are an integral part of nature.” — Mads Fogtmann</p><p><br></p><p>“If you can’t measure it, then how can you protect the rights? – [We come with the solution] that allows them to measure [the impact on biodiversity] so they can protect it. We do this by using insect sensing. The reason we do this is that insects are so fundamental to the ecosystem.” — Mads Fogtmann</p><p>“Insects are the best proxy that you can have for actually measuring the health of [an] ecosystem.” — Mads Fogtmann</p><p><br></p><p>“There’s a huge need and an interest in ‘how we can actually scale biodiversity monitoring to kind of help us understand what’s going on with nature at the moment.’” — Mads Fogtmann</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/madsfogtmannhansen/?original_referer=https://duckduckgo.com/&amp;originalSubdomain=dk">Mads Fogtmann on LinkedIn<br></a><a href="https://faunaphotonics.com/">FaunaPhotonics</a></p><p><a href="https://www.linkedin.com/company/faunaphotonics/">FaunaPhotonics on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 12 Aug 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/50d9fc5b/068e3919.mp3" length="19844208" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/Jw5uwWWLed4pgt01DIzne61Csb8rOVP5BXC-xQXJ5Cg/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS82YmJj/N2VjZDk2MTFmNmMw/OTRlNWNlOGUzODM3/N2Q3MC5wbmc.jpg"/>
      <itunes:duration>1234</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>What if technology could be the key to averting a biodiversity crisis? Today, I explore this possibility with Mads Fogtmann, Chief Data Officer of FaunaPhotonics, as we discuss their groundbreaking approach to biodiversity monitoring. I talk with Mads about the looming biodiversity crisis, the innovative solutions his team is developing to address the urgent need for scalable biodiversity monitoring, and the central role that humans have to play in all this. Find out how the FaunaPhotonics platform is employing advanced sensing technology and machine learning to protect ecosystems, why insects are such useful proxies for monitoring ecosystem health, and their successful partnerships with other domain experts and researchers. Our conversation also covers the broader implications of biodiversity loss, the role of public awareness in conservation, and the future of biodiversity monitoring. Join us for a comprehensive and insightful discussion on how technology can help safeguard our planet's future and ensure the stability of natural and human systems alike!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Some background on Mads and his transition from academia to the private sector.</li><li>The FaunaPhotonics platform and how it monitors biodiversity.</li><li>An overview of the biodiversity crisis and the urgent need to address it.</li><li>Understanding our connection to, and dependence on, nature.</li><li>The risks that the biodiversity crisis poses for supply chains.</li><li>FaunaPhotonics’ role in measuring the biodiversity crisis: why this protects ecosystems.</li><li>Why insects are the best available proxy for measuring ecosystem health.</li><li>How sensing technology and machine learning are utilized by FaunaPhotonics.</li><li>Case studies showcasing the impact of FaunaPhotonics' technology.</li><li>Future directions and innovations in biodiversity monitoring.</li><li>Key challenges faced in developing and deploying biodiversity monitoring technology.</li><li>FaunaPhotonics’ collaboration with other domain experts and researchers in the field.</li><li>Why there is such an urgent need for scaleable biodiversity monitoring.</li><li>The importance of public awareness and education in addressing the biodiversity crisis.</li><li>Mads’ advice to leaders of other AI-powered startups and the future of FaunaPhotonics.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“The clothes we wear, the food we eat, the water we drink, the material we use to build houses: everything comes from nature. And right now, we are destroying that foundation rapidly.” — Mads Fogtmann</p><p><br></p><p>“I think it’s important that we become more aware that we are an integral part of nature.” — Mads Fogtmann</p><p><br></p><p>“If you can’t measure it, then how can you protect the rights? – [We come with the solution] that allows them to measure [the impact on biodiversity] so they can protect it. We do this by using insect sensing. The reason we do this is that insects are so fundamental to the ecosystem.” — Mads Fogtmann</p><p>“Insects are the best proxy that you can have for actually measuring the health of [an] ecosystem.” — Mads Fogtmann</p><p><br></p><p>“There’s a huge need and an interest in ‘how we can actually scale biodiversity monitoring to kind of help us understand what’s going on with nature at the moment.’” — Mads Fogtmann</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/madsfogtmannhansen/?original_referer=https://duckduckgo.com/&amp;originalSubdomain=dk">Mads Fogtmann on LinkedIn<br></a><a href="https://faunaphotonics.com/">FaunaPhotonics</a></p><p><a href="https://www.linkedin.com/company/faunaphotonics/">FaunaPhotonics on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, biodiversity</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/50d9fc5b/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Optimizing Manufacturing with Berk Birand from Fero Labs</title>
      <itunes:episode>94</itunes:episode>
      <podcast:episode>94</podcast:episode>
      <itunes:title>Optimizing Manufacturing with Berk Birand from Fero Labs</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">043f86ac-86af-41ae-9bc3-5d6376395f42</guid>
      <link>https://pixelscientia.com/podcast/optimizing-manufacturing-with-berk-birand-from-fero-labs/</link>
      <description>
        <![CDATA[<p>Manufacturing is a fundamental part of our economy. Unfortunately, a huge swath of the industry is still dependent on outdated methods, adversely impacting our environment. To address these challenges, one company is harnessing the power of AI to transform traditional manufacturing, driving unprecedented efficiency and sustainability in the industry. Joining me today is Berk Birand, co-founder and CEO of Fero Labs, to unpack how AI is optimizing the manufacturing sector.</p><p>Tuning in, you'll learn all about Fero Labs' innovative software and how it’s empowering engineers in industries like steel and chemicals to harness machine learning, drastically reducing waste and energy consumption. We discuss how their AI analyzes historical production data to ensure factories operate at peak performance and how this is boosting sustainability and profitability. Our conversation also unpacks the critical role of explainable AI in building trust within the industrial sector, where precision and reliability are essential. Tune in to discover how Fero Labs is paving the way for a greener industrial future!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Berk Birand’s education and career background.</li><li>How he co-founded Fero Labs with his business partner.</li><li>An overview of Fero Labs’ AI software.</li><li>Fero Labs’ role in reducing raw material waste in the steel industry.</li><li>How they have helped improve energy efficiency in chemical manufacturing.</li><li>Data analysis and how their software provides recommendations for efficient operations.</li><li>Understanding the high stakes involved in manufacturing processes.</li><li>Why AI explainability is crucial in the industrial sector.</li><li>How they are building explainable models that engineers can trust and understand.</li><li>Why now is the right time to build this technology.</li><li>His advice to AI-powered startups: seriously consider the cost of a bad prediction.</li><li>Fero Labs’ long-term vision to achieve a more circular and sustainable industrial sector.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>"One of our largest customers was able to reduce the waste of raw materials, about a million pounds just throughout last year, by using our software AI system." — Berk Birand</p><p><br></p><p>"We think AI will play a key role in the transition to a green economy." — Berk Birand</p><p><br>"The best people to be solving these types of challenges, ultimately, are the engineers that work at the plants. The engineers that have the most domain expertise." — Berk Birand</p><p><br></p><p>"In an environment like this, an engineer in a factory would just not want to use a software that they don't trust, because ultimately, it's their job that's on the line." — Berk Birand</p><p><br>“With the new drive towards building an industrial sector that is more circular and more sustainable, there's incredible potential to optimize not just an individual factory, but beyond that, to optimize the entire supply chain by optimizing factories jointly.” — Berk Birand</p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/berkbirand/">Berk Birand on LinkedIn</a></p><p><a href="https://www.ferolabs.com/">Fero Labs</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Manufacturing is a fundamental part of our economy. Unfortunately, a huge swath of the industry is still dependent on outdated methods, adversely impacting our environment. To address these challenges, one company is harnessing the power of AI to transform traditional manufacturing, driving unprecedented efficiency and sustainability in the industry. Joining me today is Berk Birand, co-founder and CEO of Fero Labs, to unpack how AI is optimizing the manufacturing sector.</p><p>Tuning in, you'll learn all about Fero Labs' innovative software and how it’s empowering engineers in industries like steel and chemicals to harness machine learning, drastically reducing waste and energy consumption. We discuss how their AI analyzes historical production data to ensure factories operate at peak performance and how this is boosting sustainability and profitability. Our conversation also unpacks the critical role of explainable AI in building trust within the industrial sector, where precision and reliability are essential. Tune in to discover how Fero Labs is paving the way for a greener industrial future!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Berk Birand’s education and career background.</li><li>How he co-founded Fero Labs with his business partner.</li><li>An overview of Fero Labs’ AI software.</li><li>Fero Labs’ role in reducing raw material waste in the steel industry.</li><li>How they have helped improve energy efficiency in chemical manufacturing.</li><li>Data analysis and how their software provides recommendations for efficient operations.</li><li>Understanding the high stakes involved in manufacturing processes.</li><li>Why AI explainability is crucial in the industrial sector.</li><li>How they are building explainable models that engineers can trust and understand.</li><li>Why now is the right time to build this technology.</li><li>His advice to AI-powered startups: seriously consider the cost of a bad prediction.</li><li>Fero Labs’ long-term vision to achieve a more circular and sustainable industrial sector.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>"One of our largest customers was able to reduce the waste of raw materials, about a million pounds just throughout last year, by using our software AI system." — Berk Birand</p><p><br></p><p>"We think AI will play a key role in the transition to a green economy." — Berk Birand</p><p><br>"The best people to be solving these types of challenges, ultimately, are the engineers that work at the plants. The engineers that have the most domain expertise." — Berk Birand</p><p><br></p><p>"In an environment like this, an engineer in a factory would just not want to use a software that they don't trust, because ultimately, it's their job that's on the line." — Berk Birand</p><p><br>“With the new drive towards building an industrial sector that is more circular and more sustainable, there's incredible potential to optimize not just an individual factory, but beyond that, to optimize the entire supply chain by optimizing factories jointly.” — Berk Birand</p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/berkbirand/">Berk Birand on LinkedIn</a></p><p><a href="https://www.ferolabs.com/">Fero Labs</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 05 Aug 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/f4f10771/c0132214.mp3" length="20088968" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/-PXrmTlkfvPAPfuUw-oo1HSlN18ylgtfhD2CuP8Mu40/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS82NWQw/OTY3OTZmMjhjMzA0/NWE3NzY3NzM4ZTli/YTUxMC5wbmc.jpg"/>
      <itunes:duration>1250</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Manufacturing is a fundamental part of our economy. Unfortunately, a huge swath of the industry is still dependent on outdated methods, adversely impacting our environment. To address these challenges, one company is harnessing the power of AI to transform traditional manufacturing, driving unprecedented efficiency and sustainability in the industry. Joining me today is Berk Birand, co-founder and CEO of Fero Labs, to unpack how AI is optimizing the manufacturing sector.</p><p>Tuning in, you'll learn all about Fero Labs' innovative software and how it’s empowering engineers in industries like steel and chemicals to harness machine learning, drastically reducing waste and energy consumption. We discuss how their AI analyzes historical production data to ensure factories operate at peak performance and how this is boosting sustainability and profitability. Our conversation also unpacks the critical role of explainable AI in building trust within the industrial sector, where precision and reliability are essential. Tune in to discover how Fero Labs is paving the way for a greener industrial future!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Berk Birand’s education and career background.</li><li>How he co-founded Fero Labs with his business partner.</li><li>An overview of Fero Labs’ AI software.</li><li>Fero Labs’ role in reducing raw material waste in the steel industry.</li><li>How they have helped improve energy efficiency in chemical manufacturing.</li><li>Data analysis and how their software provides recommendations for efficient operations.</li><li>Understanding the high stakes involved in manufacturing processes.</li><li>Why AI explainability is crucial in the industrial sector.</li><li>How they are building explainable models that engineers can trust and understand.</li><li>Why now is the right time to build this technology.</li><li>His advice to AI-powered startups: seriously consider the cost of a bad prediction.</li><li>Fero Labs’ long-term vision to achieve a more circular and sustainable industrial sector.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>"One of our largest customers was able to reduce the waste of raw materials, about a million pounds just throughout last year, by using our software AI system." — Berk Birand</p><p><br></p><p>"We think AI will play a key role in the transition to a green economy." — Berk Birand</p><p><br>"The best people to be solving these types of challenges, ultimately, are the engineers that work at the plants. The engineers that have the most domain expertise." — Berk Birand</p><p><br></p><p>"In an environment like this, an engineer in a factory would just not want to use a software that they don't trust, because ultimately, it's their job that's on the line." — Berk Birand</p><p><br>“With the new drive towards building an industrial sector that is more circular and more sustainable, there's incredible potential to optimize not just an individual factory, but beyond that, to optimize the entire supply chain by optimizing factories jointly.” — Berk Birand</p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/berkbirand/">Berk Birand on LinkedIn</a></p><p><a href="https://www.ferolabs.com/">Fero Labs</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, manufacturing</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/f4f10771/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>More Successful IVF with Daniella Gilboa from AIVF</title>
      <itunes:episode>93</itunes:episode>
      <podcast:episode>93</podcast:episode>
      <itunes:title>More Successful IVF with Daniella Gilboa from AIVF</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">f0762659-6224-43b3-8bce-33fdf92240da</guid>
      <link>https://pixelscientia.com/podcast/more-successful-ivf-with-daniella-gilboa-from-aivf/</link>
      <description>
        <![CDATA[<p>In this episode of Impact AI, we delve into the transformative impact of AI on in-vitro fertilization (IVF) with Daniella Gilboa, co-founder and CEO of AIVF, a startup that develops AI-powered IVF solutions to help increase the certainty of a successful journey to parenthood. Join me as Daniella shares her mission to democratize fertility care and offers insight into AIVF’s proprietary technology that delivers reliable, objective, and data-driven IVF outcomes for clinicians, embryologists, and patients. We explore the role and challenges of machine learning at AIVF, strategies for validating AI models in clinical practice, and the current demand for AI-powered IVF solutions. We also discuss the metrics used to measure the impact of AIVF's technology, Daniella’s advice for other AI-powered startup leaders, and her vision for the future. Tune in to gain valuable insights into the future of fertility care and find out how AI is making IVF more effective and accessible!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>How Daniella came to understand the epidemiology and data aspects of fertility.</li><li>What AIVF does and why it’s so important for both patients and clinicians.</li><li>The role of machine learning at AIVF and the challenges their models encounter. </li><li>AIVF’s strategy for validating their models and translating KPIs into clinical settings.</li><li>The value of explainability to empower embryologists to use AI as a tool.</li><li>Daniella’s definition of computational embryology, assisted by machine learning.</li><li>Why now is the right time for AI-powered IVF solutions.</li><li>Metrics that AIVF uses to measure the impact of their technology.</li><li>Danielle’s advice for leaders of AI-powered startups and her vision for the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We showed that if you use AI as a tool for the embryologist – [it] increased the success rates – The decision-making is faster, more accurate. You freeze less embryos because each embryo you freeze is accurate – It changes the way the lab works and it optimizes everything.” — Daniella Gilboa</p><p><br></p><p>“The way you interact with the patient and consult the journey ahead is changing. It’s more accurate. It allows you to make more informed decisions. This is the right way of doing medicine. It needs to be data-driven rather than subjective human analysis.” — Daniella Gilboa</p><p><br></p><p>“AIVF needs to become the standard of care.” — Daniella Gilboa</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://aivf.co/">AIVF</a></p><p><a href="https://www.linkedin.com/in/daniella-gilboa-20295713/">Daniella Gilboa on LinkedIn</a></p><p><a href="https://x.com/gilboadaniella">Daniella Gilboa on X</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode of Impact AI, we delve into the transformative impact of AI on in-vitro fertilization (IVF) with Daniella Gilboa, co-founder and CEO of AIVF, a startup that develops AI-powered IVF solutions to help increase the certainty of a successful journey to parenthood. Join me as Daniella shares her mission to democratize fertility care and offers insight into AIVF’s proprietary technology that delivers reliable, objective, and data-driven IVF outcomes for clinicians, embryologists, and patients. We explore the role and challenges of machine learning at AIVF, strategies for validating AI models in clinical practice, and the current demand for AI-powered IVF solutions. We also discuss the metrics used to measure the impact of AIVF's technology, Daniella’s advice for other AI-powered startup leaders, and her vision for the future. Tune in to gain valuable insights into the future of fertility care and find out how AI is making IVF more effective and accessible!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>How Daniella came to understand the epidemiology and data aspects of fertility.</li><li>What AIVF does and why it’s so important for both patients and clinicians.</li><li>The role of machine learning at AIVF and the challenges their models encounter. </li><li>AIVF’s strategy for validating their models and translating KPIs into clinical settings.</li><li>The value of explainability to empower embryologists to use AI as a tool.</li><li>Daniella’s definition of computational embryology, assisted by machine learning.</li><li>Why now is the right time for AI-powered IVF solutions.</li><li>Metrics that AIVF uses to measure the impact of their technology.</li><li>Danielle’s advice for leaders of AI-powered startups and her vision for the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We showed that if you use AI as a tool for the embryologist – [it] increased the success rates – The decision-making is faster, more accurate. You freeze less embryos because each embryo you freeze is accurate – It changes the way the lab works and it optimizes everything.” — Daniella Gilboa</p><p><br></p><p>“The way you interact with the patient and consult the journey ahead is changing. It’s more accurate. It allows you to make more informed decisions. This is the right way of doing medicine. It needs to be data-driven rather than subjective human analysis.” — Daniella Gilboa</p><p><br></p><p>“AIVF needs to become the standard of care.” — Daniella Gilboa</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://aivf.co/">AIVF</a></p><p><a href="https://www.linkedin.com/in/daniella-gilboa-20295713/">Daniella Gilboa on LinkedIn</a></p><p><a href="https://x.com/gilboadaniella">Daniella Gilboa on X</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 29 Jul 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/d7ecf55d/4ac9b6ab.mp3" length="39825616" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/l7f4zyJdqX3mP2Wr7Ft7SsgQKjCXHNZVNsMp4CuZaH0/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS82ZGQ1/MWRhZmRhNGQzNjIx/ZGU0YmZhOWNkMjEy/MmI2OC5wbmc.jpg"/>
      <itunes:duration>1658</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode of Impact AI, we delve into the transformative impact of AI on in-vitro fertilization (IVF) with Daniella Gilboa, co-founder and CEO of AIVF, a startup that develops AI-powered IVF solutions to help increase the certainty of a successful journey to parenthood. Join me as Daniella shares her mission to democratize fertility care and offers insight into AIVF’s proprietary technology that delivers reliable, objective, and data-driven IVF outcomes for clinicians, embryologists, and patients. We explore the role and challenges of machine learning at AIVF, strategies for validating AI models in clinical practice, and the current demand for AI-powered IVF solutions. We also discuss the metrics used to measure the impact of AIVF's technology, Daniella’s advice for other AI-powered startup leaders, and her vision for the future. Tune in to gain valuable insights into the future of fertility care and find out how AI is making IVF more effective and accessible!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>How Daniella came to understand the epidemiology and data aspects of fertility.</li><li>What AIVF does and why it’s so important for both patients and clinicians.</li><li>The role of machine learning at AIVF and the challenges their models encounter. </li><li>AIVF’s strategy for validating their models and translating KPIs into clinical settings.</li><li>The value of explainability to empower embryologists to use AI as a tool.</li><li>Daniella’s definition of computational embryology, assisted by machine learning.</li><li>Why now is the right time for AI-powered IVF solutions.</li><li>Metrics that AIVF uses to measure the impact of their technology.</li><li>Danielle’s advice for leaders of AI-powered startups and her vision for the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We showed that if you use AI as a tool for the embryologist – [it] increased the success rates – The decision-making is faster, more accurate. You freeze less embryos because each embryo you freeze is accurate – It changes the way the lab works and it optimizes everything.” — Daniella Gilboa</p><p><br></p><p>“The way you interact with the patient and consult the journey ahead is changing. It’s more accurate. It allows you to make more informed decisions. This is the right way of doing medicine. It needs to be data-driven rather than subjective human analysis.” — Daniella Gilboa</p><p><br></p><p>“AIVF needs to become the standard of care.” — Daniella Gilboa</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://aivf.co/">AIVF</a></p><p><a href="https://www.linkedin.com/in/daniella-gilboa-20295713/">Daniella Gilboa on LinkedIn</a></p><p><a href="https://x.com/gilboadaniella">Daniella Gilboa on X</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, ivf</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/d7ecf55d/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Vision Intelligence Filters with Kit Merker from Plainsight Technologies</title>
      <itunes:episode>92</itunes:episode>
      <podcast:episode>92</podcast:episode>
      <itunes:title>Vision Intelligence Filters with Kit Merker from Plainsight Technologies</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">4f1f3672-3199-4cc8-80cd-1f66cfaf9703</guid>
      <link>https://pixelscientia.com/podcast/vision-intelligence-filters-with-kit-merker-from-plainsight/</link>
      <description>
        <![CDATA[<p>Image-based machine learning is fast becoming an AI staple, and with its new Vision Intelligence Filters, Plainsight Technologies is staking its claim as an industry pioneer. Today, I am joined by Plainsight CEO, Kit Merker, who is here to share all the details behind his company’s latest innovation. Kit begins by explaining what Plainsight does and why this work matters in the AI realm. Then, we learn about the mechanics behind Plainsight’s Vision Intelligence Filters, the company’s ML models and data protocols concerning existing customers, the ins and outs of bringing a product like the Vision Intelligence Filters to life, and how bias manifests in image-trained models. We also discuss the most game-changing applications that Kit has been involved in, and he shares some critical advice for young leaders of AI-powered startups, plus so much more!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Kit’s professional background and how he ended up at Plainsight.</li><li>What Plainsight does and why this work matters. </li><li>The mechanics behind Plainsight's Vision Intelligence Filters.</li><li>How the company's ML models and data use relate to its customers </li><li>Understanding when domain expertise comes into play. </li><li>The process of planning and developing a new filter.</li><li>How bias manifests in image-trained models, and how Kit and his team are mitigating this.  </li><li>The most interesting and game-changing applications that Kit has worked on. </li><li>His advice to other leaders of AI-powered startups.</li><li>Kit’s vision for the future of Plainsight Technologies.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our goal is to give customers very high accuracy on their models.” — Kit Merker</p><p><br></p><p>“A lot of times, traditional enterprises are looking for a solution or an app. The filter is like an app, and so customers can start really small with us, get an app that they trust the data, and then expand from there. They don't have any machine learning expertise required.” — Kit Merker</p><p><br></p><p>“Don't fake your demos!” — Kit Merker</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.kitmerker.com">Kit Merker</a></p><p><a href="https://www.linkedin.com/in/kitmerker">Kit Merker on LinkedIn</a></p><p><a href="https://x.com/KitMerker">Kit Merker on X</a>  </p><p><a href="https://plainsight.ai/">Plainsight Technologies</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Image-based machine learning is fast becoming an AI staple, and with its new Vision Intelligence Filters, Plainsight Technologies is staking its claim as an industry pioneer. Today, I am joined by Plainsight CEO, Kit Merker, who is here to share all the details behind his company’s latest innovation. Kit begins by explaining what Plainsight does and why this work matters in the AI realm. Then, we learn about the mechanics behind Plainsight’s Vision Intelligence Filters, the company’s ML models and data protocols concerning existing customers, the ins and outs of bringing a product like the Vision Intelligence Filters to life, and how bias manifests in image-trained models. We also discuss the most game-changing applications that Kit has been involved in, and he shares some critical advice for young leaders of AI-powered startups, plus so much more!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Kit’s professional background and how he ended up at Plainsight.</li><li>What Plainsight does and why this work matters. </li><li>The mechanics behind Plainsight's Vision Intelligence Filters.</li><li>How the company's ML models and data use relate to its customers </li><li>Understanding when domain expertise comes into play. </li><li>The process of planning and developing a new filter.</li><li>How bias manifests in image-trained models, and how Kit and his team are mitigating this.  </li><li>The most interesting and game-changing applications that Kit has worked on. </li><li>His advice to other leaders of AI-powered startups.</li><li>Kit’s vision for the future of Plainsight Technologies.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our goal is to give customers very high accuracy on their models.” — Kit Merker</p><p><br></p><p>“A lot of times, traditional enterprises are looking for a solution or an app. The filter is like an app, and so customers can start really small with us, get an app that they trust the data, and then expand from there. They don't have any machine learning expertise required.” — Kit Merker</p><p><br></p><p>“Don't fake your demos!” — Kit Merker</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.kitmerker.com">Kit Merker</a></p><p><a href="https://www.linkedin.com/in/kitmerker">Kit Merker on LinkedIn</a></p><p><a href="https://x.com/KitMerker">Kit Merker on X</a>  </p><p><a href="https://plainsight.ai/">Plainsight Technologies</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 22 Jul 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/76e9e83f/281425f3.mp3" length="27206783" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/5VUuq-6k4-WApkmAwQsQ63RkyXrp5w_Jzk3Mhkcv_0o/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9mMjRh/NDI5YzE1MjNlZmQ3/ZTdjMjZkMjI2NDA2/ZDg0Ny5qcGc.jpg"/>
      <itunes:duration>1693</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Image-based machine learning is fast becoming an AI staple, and with its new Vision Intelligence Filters, Plainsight Technologies is staking its claim as an industry pioneer. Today, I am joined by Plainsight CEO, Kit Merker, who is here to share all the details behind his company’s latest innovation. Kit begins by explaining what Plainsight does and why this work matters in the AI realm. Then, we learn about the mechanics behind Plainsight’s Vision Intelligence Filters, the company’s ML models and data protocols concerning existing customers, the ins and outs of bringing a product like the Vision Intelligence Filters to life, and how bias manifests in image-trained models. We also discuss the most game-changing applications that Kit has been involved in, and he shares some critical advice for young leaders of AI-powered startups, plus so much more!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Kit’s professional background and how he ended up at Plainsight.</li><li>What Plainsight does and why this work matters. </li><li>The mechanics behind Plainsight's Vision Intelligence Filters.</li><li>How the company's ML models and data use relate to its customers </li><li>Understanding when domain expertise comes into play. </li><li>The process of planning and developing a new filter.</li><li>How bias manifests in image-trained models, and how Kit and his team are mitigating this.  </li><li>The most interesting and game-changing applications that Kit has worked on. </li><li>His advice to other leaders of AI-powered startups.</li><li>Kit’s vision for the future of Plainsight Technologies.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our goal is to give customers very high accuracy on their models.” — Kit Merker</p><p><br></p><p>“A lot of times, traditional enterprises are looking for a solution or an app. The filter is like an app, and so customers can start really small with us, get an app that they trust the data, and then expand from there. They don't have any machine learning expertise required.” — Kit Merker</p><p><br></p><p>“Don't fake your demos!” — Kit Merker</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.kitmerker.com">Kit Merker</a></p><p><a href="https://www.linkedin.com/in/kitmerker">Kit Merker on LinkedIn</a></p><p><a href="https://x.com/KitMerker">Kit Merker on X</a>  </p><p><a href="https://plainsight.ai/">Plainsight Technologies</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, computer vision, ai</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/76e9e83f/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Interpreting Infant Cries with Charles Onu from Ubenwa Health</title>
      <itunes:episode>91</itunes:episode>
      <podcast:episode>91</podcast:episode>
      <itunes:title>Interpreting Infant Cries with Charles Onu from Ubenwa Health</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">36a46e57-699e-4e8d-9ed4-fd5e8c7e6c33</guid>
      <link>https://pixelscientia.com/podcast/interpreting-infant-cries-with-charles-onu-from-ubenwa-health/</link>
      <description>
        <![CDATA[<p>Infants cry when they're hungry, tired, uncomfortable, or upset. They also cry when they’re in pain or severely ill. But how can parents tell the difference? To help us address this critical question, I'm joined by Charles Onu, a health informatics researcher, software engineer, and CEO of Ubenwa. Ubenwa is a groundbreaking app that uses AI to interpret infants' needs and health by analyzing the biomarkers in their cries. Charles conceived of the idea while working in local communities in south-eastern Nigeria, where high rates of newborn mortality due to late detection of Perinatal Asphyxia inspired him to create a solution.</p><p>In this episode, Charles shares insights into Ubenwa's machine-learning models and how they establish an infant's cry as a vital sign. He discusses the process of collecting and annotating data through partnerships with children's hospitals, the challenges of working with audio data, the benefits of creating a foundation model for infant cries, and much more. He also offers human-focused advice for leaders of AI-powered startups and reflects on his vision for success and the impact he hopes to achieve with Ubenwa. Tune in to discover how understanding your infant’s cries can transform healthcare and well-being for newborns and their families!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Charles' converging interests in math and healthcare, which led him to create Ubenwa.</li><li>What Ubenwa does to establish an infant’s cry as a vital sign (and why it’s so important).</li><li>The essential end-to-end role that machine learning plays in this technology.</li><li>How Ubenwa collects and annotates data by partnering with children’s hospitals.</li><li>Challenges of working with audio data and training medical ML models on it.</li><li>Insight into the benefits of creating a foundation model for infant cries.</li><li>Variations in infant’s cries and how Ubenwa’s models generalize for these shifts.</li><li>Valuable research Ubenwa has made publicly available as a gift to the ML community.</li><li>Charles’ human-focused advice for other leaders of AI-powered startups.</li><li>What success means to Charles and the impact he hopes to make with Ubenwa.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Ubenwa was born out of the idea that, if there's something that [human doctors] can listen to to come to a conclusion [about an infant’s health], then there has to be something machines can also learn from the infant's cry.” — Charles Onu</p><p><br></p><p>“The real leap we made with self-supervised learning is that you now do not need an external annotation to learn. The model can use the data to supervise itself.” — Charles Onu</p><p><br></p><p>“AI-powered or not, – the problem of a startup remains the same. It’s to meet a need that humans have. – At the end of the day, AI is not just there for AI only. It’s only going to be a successful and useful startup if you identify a need and [solve] that problem.” — Charles Onu</p><p><br></p><p>“Human babies have evolved to communicate their needs and their health through their cries. We [haven’t] had the tools to understand that. Babies have been trying to talk to us for a long time. It's time to listen.” — Charles Onu</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.ubenwa.ai/">Ubenwa Health</a></p><p><a href="https://ubenwa.ai/nanni.html">Nanni AI</a></p><p><a href="https://www.linkedin.com/in/onucharles/">Charles Onu on LinkedIn</a></p><p><a href="https://x.com/onucharlesc">Charles Onu on X</a></p><p><a href="https://onucharles.github.io/">Charles Onu on GitHub</a></p><p><a href="https://github.com/Ubenwa">Ubenwa on GitHub</a></p><p><a href="https://github.com/Ubenwa/cryceleb2023">Ubenwa CryCeleb Database</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Infants cry when they're hungry, tired, uncomfortable, or upset. They also cry when they’re in pain or severely ill. But how can parents tell the difference? To help us address this critical question, I'm joined by Charles Onu, a health informatics researcher, software engineer, and CEO of Ubenwa. Ubenwa is a groundbreaking app that uses AI to interpret infants' needs and health by analyzing the biomarkers in their cries. Charles conceived of the idea while working in local communities in south-eastern Nigeria, where high rates of newborn mortality due to late detection of Perinatal Asphyxia inspired him to create a solution.</p><p>In this episode, Charles shares insights into Ubenwa's machine-learning models and how they establish an infant's cry as a vital sign. He discusses the process of collecting and annotating data through partnerships with children's hospitals, the challenges of working with audio data, the benefits of creating a foundation model for infant cries, and much more. He also offers human-focused advice for leaders of AI-powered startups and reflects on his vision for success and the impact he hopes to achieve with Ubenwa. Tune in to discover how understanding your infant’s cries can transform healthcare and well-being for newborns and their families!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Charles' converging interests in math and healthcare, which led him to create Ubenwa.</li><li>What Ubenwa does to establish an infant’s cry as a vital sign (and why it’s so important).</li><li>The essential end-to-end role that machine learning plays in this technology.</li><li>How Ubenwa collects and annotates data by partnering with children’s hospitals.</li><li>Challenges of working with audio data and training medical ML models on it.</li><li>Insight into the benefits of creating a foundation model for infant cries.</li><li>Variations in infant’s cries and how Ubenwa’s models generalize for these shifts.</li><li>Valuable research Ubenwa has made publicly available as a gift to the ML community.</li><li>Charles’ human-focused advice for other leaders of AI-powered startups.</li><li>What success means to Charles and the impact he hopes to make with Ubenwa.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Ubenwa was born out of the idea that, if there's something that [human doctors] can listen to to come to a conclusion [about an infant’s health], then there has to be something machines can also learn from the infant's cry.” — Charles Onu</p><p><br></p><p>“The real leap we made with self-supervised learning is that you now do not need an external annotation to learn. The model can use the data to supervise itself.” — Charles Onu</p><p><br></p><p>“AI-powered or not, – the problem of a startup remains the same. It’s to meet a need that humans have. – At the end of the day, AI is not just there for AI only. It’s only going to be a successful and useful startup if you identify a need and [solve] that problem.” — Charles Onu</p><p><br></p><p>“Human babies have evolved to communicate their needs and their health through their cries. We [haven’t] had the tools to understand that. Babies have been trying to talk to us for a long time. It's time to listen.” — Charles Onu</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.ubenwa.ai/">Ubenwa Health</a></p><p><a href="https://ubenwa.ai/nanni.html">Nanni AI</a></p><p><a href="https://www.linkedin.com/in/onucharles/">Charles Onu on LinkedIn</a></p><p><a href="https://x.com/onucharlesc">Charles Onu on X</a></p><p><a href="https://onucharles.github.io/">Charles Onu on GitHub</a></p><p><a href="https://github.com/Ubenwa">Ubenwa on GitHub</a></p><p><a href="https://github.com/Ubenwa/cryceleb2023">Ubenwa CryCeleb Database</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 15 Jul 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/2d5e4672/6d4ab095.mp3" length="22057847" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/5v50RQYkAkYzmBrc6OEXn-Qp55AkhsvHT6vwL97zOYE/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9mNTIw/MGJhOTA3NTFjYzk0/YTE3NzBjODI3ZDI5/NjliNy5qcGVn.jpg"/>
      <itunes:duration>1373</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Infants cry when they're hungry, tired, uncomfortable, or upset. They also cry when they’re in pain or severely ill. But how can parents tell the difference? To help us address this critical question, I'm joined by Charles Onu, a health informatics researcher, software engineer, and CEO of Ubenwa. Ubenwa is a groundbreaking app that uses AI to interpret infants' needs and health by analyzing the biomarkers in their cries. Charles conceived of the idea while working in local communities in south-eastern Nigeria, where high rates of newborn mortality due to late detection of Perinatal Asphyxia inspired him to create a solution.</p><p>In this episode, Charles shares insights into Ubenwa's machine-learning models and how they establish an infant's cry as a vital sign. He discusses the process of collecting and annotating data through partnerships with children's hospitals, the challenges of working with audio data, the benefits of creating a foundation model for infant cries, and much more. He also offers human-focused advice for leaders of AI-powered startups and reflects on his vision for success and the impact he hopes to achieve with Ubenwa. Tune in to discover how understanding your infant’s cries can transform healthcare and well-being for newborns and their families!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Charles' converging interests in math and healthcare, which led him to create Ubenwa.</li><li>What Ubenwa does to establish an infant’s cry as a vital sign (and why it’s so important).</li><li>The essential end-to-end role that machine learning plays in this technology.</li><li>How Ubenwa collects and annotates data by partnering with children’s hospitals.</li><li>Challenges of working with audio data and training medical ML models on it.</li><li>Insight into the benefits of creating a foundation model for infant cries.</li><li>Variations in infant’s cries and how Ubenwa’s models generalize for these shifts.</li><li>Valuable research Ubenwa has made publicly available as a gift to the ML community.</li><li>Charles’ human-focused advice for other leaders of AI-powered startups.</li><li>What success means to Charles and the impact he hopes to make with Ubenwa.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Ubenwa was born out of the idea that, if there's something that [human doctors] can listen to to come to a conclusion [about an infant’s health], then there has to be something machines can also learn from the infant's cry.” — Charles Onu</p><p><br></p><p>“The real leap we made with self-supervised learning is that you now do not need an external annotation to learn. The model can use the data to supervise itself.” — Charles Onu</p><p><br></p><p>“AI-powered or not, – the problem of a startup remains the same. It’s to meet a need that humans have. – At the end of the day, AI is not just there for AI only. It’s only going to be a successful and useful startup if you identify a need and [solve] that problem.” — Charles Onu</p><p><br></p><p>“Human babies have evolved to communicate their needs and their health through their cries. We [haven’t] had the tools to understand that. Babies have been trying to talk to us for a long time. It's time to listen.” — Charles Onu</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.ubenwa.ai/">Ubenwa Health</a></p><p><a href="https://ubenwa.ai/nanni.html">Nanni AI</a></p><p><a href="https://www.linkedin.com/in/onucharles/">Charles Onu on LinkedIn</a></p><p><a href="https://x.com/onucharlesc">Charles Onu on X</a></p><p><a href="https://onucharles.github.io/">Charles Onu on GitHub</a></p><p><a href="https://github.com/Ubenwa">Ubenwa on GitHub</a></p><p><a href="https://github.com/Ubenwa/cryceleb2023">Ubenwa CryCeleb Database</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, healthcare</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/2d5e4672/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Remote Monitoring and Water Forecasting with Marshall Moutenot from Upstream Tech</title>
      <itunes:episode>90</itunes:episode>
      <podcast:episode>90</podcast:episode>
      <itunes:title>Remote Monitoring and Water Forecasting with Marshall Moutenot from Upstream Tech</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">7766f2e7-4d3b-4612-8280-13f4c6ce8ae5</guid>
      <link>https://pixelscientia.com/podcast/remote-monitoring-and-water-forecasting-with-marshall-moutenot-from-upstream-tech/</link>
      <description>
        <![CDATA[<p>Innovative AI technologies are paving the way for more efficient and impactful environmental monitoring. Joining me today to discuss remote monitoring and water forecasting is Marshall Moutenot, the co-founder and CEO of Upstream Tech. From using satellite imagery to monitor conservation projects to employing machine learning for accurate water flow predictions, Upstream Tech is at the forefront of leveraging technology to address environmental challenges.</p><p>In our conversation, Marshall shares his journey from a tech-savvy childhood to co-founding a company with a mission to make environmental monitoring scalable and cost-effective. He delves into the development of Upstream Tech's two primary products: Lens, for remote monitoring of climate solutions, and HydroForecast, which uses AI to predict water flow, aiding in hydropower management. Marshall also underscores the need for integrating domain knowledge with machine learning to create reliable models before offering practical insights for AI startups. Tune in to learn more about how AI can revolutionize environmental conservation!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>The details of Marshall’s tech-savvy childhood and entrepreneurial journey.</li><li>An overview of Upstream Tech’s mission to improve environmental monitoring.</li><li>How they use AI and satellite imagery for scalable, cost-effective monitoring.</li><li>The development of their Lens product for remote monitoring of climate solutions.</li><li>Why remote monitoring is so challenging at scale and their approach to solving it.</li><li>Their product, HydroForecast, and its role in predicting water flow using machine learning.</li><li>How integrating new inputs like satellite imagery creates reliable, adaptable models.</li><li>Success stories, including outperforming traditional models in a major competition.</li><li>Challenges Upstream Tech faces in acquiring and integrating geospatial data.</li><li>Best practices for ensuring model reliability and effectiveness over time.</li><li>Their team's approach to developing a new machine learning product or feature.</li><li>Marshall’s advice for AI startups: don’t get too attached to the tools!</li><li>His vision for Upstream Tech’s impact on environmental conservation.</li></ul><p><br></p><p><strong>Quotes:</strong><br>"What these new machine learning models that we're employing allow us to do is to provide enough data to the model to create [equations] to describe physical interactions." — Marshall Moutenot</p><p><br></p><p>“[The] adaptability of these models is something that is really exciting for the field overall." — Marshall Moutenot</p><p><br>"We train a single model on a wide diversity, which forces the model to learn the common rules across all of them.” — Marshall Moutenot<strong></strong></p><p>“As an organization, one of [Upstream Tech’s] purposes is to see the 100% renewable grid become a reality. We want to continue to contribute to that and to build forecasts that enable that future.” — Marshall Moutenot</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/marshallmoutenot/">Marshall Moutenot on LinkedIn</a></p><p><a href="https://mrshll.com/">Marshall’s Blog</a></p><p><a href="https://www.upstream.tech/">Upstream Tech</a></p><p><a href="https://www.linkedin.com/company/upstream-pbc/">Upstream Tech on LinkedIn</a></p><p><a href="https://x.com/upstream_tech">Upstream Tech on X</a></p><p><a href="https://www.youtube.com/@upstream_tech/">Upstream Tech on YouTube</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Innovative AI technologies are paving the way for more efficient and impactful environmental monitoring. Joining me today to discuss remote monitoring and water forecasting is Marshall Moutenot, the co-founder and CEO of Upstream Tech. From using satellite imagery to monitor conservation projects to employing machine learning for accurate water flow predictions, Upstream Tech is at the forefront of leveraging technology to address environmental challenges.</p><p>In our conversation, Marshall shares his journey from a tech-savvy childhood to co-founding a company with a mission to make environmental monitoring scalable and cost-effective. He delves into the development of Upstream Tech's two primary products: Lens, for remote monitoring of climate solutions, and HydroForecast, which uses AI to predict water flow, aiding in hydropower management. Marshall also underscores the need for integrating domain knowledge with machine learning to create reliable models before offering practical insights for AI startups. Tune in to learn more about how AI can revolutionize environmental conservation!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>The details of Marshall’s tech-savvy childhood and entrepreneurial journey.</li><li>An overview of Upstream Tech’s mission to improve environmental monitoring.</li><li>How they use AI and satellite imagery for scalable, cost-effective monitoring.</li><li>The development of their Lens product for remote monitoring of climate solutions.</li><li>Why remote monitoring is so challenging at scale and their approach to solving it.</li><li>Their product, HydroForecast, and its role in predicting water flow using machine learning.</li><li>How integrating new inputs like satellite imagery creates reliable, adaptable models.</li><li>Success stories, including outperforming traditional models in a major competition.</li><li>Challenges Upstream Tech faces in acquiring and integrating geospatial data.</li><li>Best practices for ensuring model reliability and effectiveness over time.</li><li>Their team's approach to developing a new machine learning product or feature.</li><li>Marshall’s advice for AI startups: don’t get too attached to the tools!</li><li>His vision for Upstream Tech’s impact on environmental conservation.</li></ul><p><br></p><p><strong>Quotes:</strong><br>"What these new machine learning models that we're employing allow us to do is to provide enough data to the model to create [equations] to describe physical interactions." — Marshall Moutenot</p><p><br></p><p>“[The] adaptability of these models is something that is really exciting for the field overall." — Marshall Moutenot</p><p><br>"We train a single model on a wide diversity, which forces the model to learn the common rules across all of them.” — Marshall Moutenot<strong></strong></p><p>“As an organization, one of [Upstream Tech’s] purposes is to see the 100% renewable grid become a reality. We want to continue to contribute to that and to build forecasts that enable that future.” — Marshall Moutenot</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/marshallmoutenot/">Marshall Moutenot on LinkedIn</a></p><p><a href="https://mrshll.com/">Marshall’s Blog</a></p><p><a href="https://www.upstream.tech/">Upstream Tech</a></p><p><a href="https://www.linkedin.com/company/upstream-pbc/">Upstream Tech on LinkedIn</a></p><p><a href="https://x.com/upstream_tech">Upstream Tech on X</a></p><p><a href="https://www.youtube.com/@upstream_tech/">Upstream Tech on YouTube</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 08 Jul 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/33a655e3/79b67e87.mp3" length="25659518" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/OrGoURFO5Owz3L6tsn2wOn1LJ9_1O6QQ32OVgmMCG_8/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS8wODZm/ZmZkNjJlOGY0YjZj/MDEzMjQyOTliZDA3/ZmE2YS5qcGVn.jpg"/>
      <itunes:duration>1595</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Innovative AI technologies are paving the way for more efficient and impactful environmental monitoring. Joining me today to discuss remote monitoring and water forecasting is Marshall Moutenot, the co-founder and CEO of Upstream Tech. From using satellite imagery to monitor conservation projects to employing machine learning for accurate water flow predictions, Upstream Tech is at the forefront of leveraging technology to address environmental challenges.</p><p>In our conversation, Marshall shares his journey from a tech-savvy childhood to co-founding a company with a mission to make environmental monitoring scalable and cost-effective. He delves into the development of Upstream Tech's two primary products: Lens, for remote monitoring of climate solutions, and HydroForecast, which uses AI to predict water flow, aiding in hydropower management. Marshall also underscores the need for integrating domain knowledge with machine learning to create reliable models before offering practical insights for AI startups. Tune in to learn more about how AI can revolutionize environmental conservation!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>The details of Marshall’s tech-savvy childhood and entrepreneurial journey.</li><li>An overview of Upstream Tech’s mission to improve environmental monitoring.</li><li>How they use AI and satellite imagery for scalable, cost-effective monitoring.</li><li>The development of their Lens product for remote monitoring of climate solutions.</li><li>Why remote monitoring is so challenging at scale and their approach to solving it.</li><li>Their product, HydroForecast, and its role in predicting water flow using machine learning.</li><li>How integrating new inputs like satellite imagery creates reliable, adaptable models.</li><li>Success stories, including outperforming traditional models in a major competition.</li><li>Challenges Upstream Tech faces in acquiring and integrating geospatial data.</li><li>Best practices for ensuring model reliability and effectiveness over time.</li><li>Their team's approach to developing a new machine learning product or feature.</li><li>Marshall’s advice for AI startups: don’t get too attached to the tools!</li><li>His vision for Upstream Tech’s impact on environmental conservation.</li></ul><p><br></p><p><strong>Quotes:</strong><br>"What these new machine learning models that we're employing allow us to do is to provide enough data to the model to create [equations] to describe physical interactions." — Marshall Moutenot</p><p><br></p><p>“[The] adaptability of these models is something that is really exciting for the field overall." — Marshall Moutenot</p><p><br>"We train a single model on a wide diversity, which forces the model to learn the common rules across all of them.” — Marshall Moutenot<strong></strong></p><p>“As an organization, one of [Upstream Tech’s] purposes is to see the 100% renewable grid become a reality. We want to continue to contribute to that and to build forecasts that enable that future.” — Marshall Moutenot</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/marshallmoutenot/">Marshall Moutenot on LinkedIn</a></p><p><a href="https://mrshll.com/">Marshall’s Blog</a></p><p><a href="https://www.upstream.tech/">Upstream Tech</a></p><p><a href="https://www.linkedin.com/company/upstream-pbc/">Upstream Tech on LinkedIn</a></p><p><a href="https://x.com/upstream_tech">Upstream Tech on X</a></p><p><a href="https://www.youtube.com/@upstream_tech/">Upstream Tech on YouTube</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, water forecasting, remote monitoring</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/33a655e3/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Scaling Healthcare Through Virtual Primary Care with Anitha Kannan from Curai</title>
      <itunes:episode>89</itunes:episode>
      <podcast:episode>89</podcast:episode>
      <itunes:title>Scaling Healthcare Through Virtual Primary Care with Anitha Kannan from Curai</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">03ff8639-3de7-4014-9491-954d27e8adff</guid>
      <link>https://pixelscientia.com/podcast/scaling-healthcare-through-virtual-primary-care-with-anitha-kannan-from-curai/</link>
      <description>
        <![CDATA[<p>What will it take to bring affordable, accessible, and timely healthcare to all? Curai, an AI-powered virtual clinic, is on a mission to do just that by leveraging AI to enhance the efficiency of licensed physicians through text-based virtual primary care. In today’s episode, I sit down with Anitha Kannan, head of AI and founding member of Curai, to talk about the transformative potential of virtual primary care and its role in scaling healthcare access.</p><p>In our conversation, Anitha delves into the technical aspects of using large language models for patient data processing, the challenges of training models with clinical data, and the strategies Curai employs to ensure high-quality care. We also discuss the innovative ways Curai integrates AI into healthcare, the significance of multidisciplinary teams, and Anitha’s vision for the future of virtual care. Tune in for an insightful conversation on scaling healthcare through virtual primary care and learn how Curai is making a real impact!</p><p><strong>Key Points:</strong></p><ul><li>Some background on Anitha Kannan, and how she joined Curai.</li><li>An overview of Curai’s services as a virtual healthcare practice.</li><li>How they provide affordable and timely healthcare access through AI-enhanced systems.</li><li>Machine learning’s role in history taking, information gathering, and summarization.</li><li>How AI streamlines the workflow for physicians.</li><li>Their use of large language models to process patient data.</li><li>Training model challenges: ensuring clinical correctness and handling data omission issues.</li><li>Best practices they’ve developed for validating models and the importance of evaluation.</li><li>Fundamental differences between their work and how other LLMs, like ChatGPT, are trained.</li><li>Their strategy for balancing long-term research aspirations with short-term product development.</li><li>An overview of their multidisciplinary teams and how this contributes to their success.</li><li>Anitha’s hopes for the future of Curai; particularly through partnerships with healthcare organizations.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>"Our mission is to provide the best health care to everyone." — Anitha Kannan</p><p><br></p><p>“Today, [Carai runs] a text-based virtual primary care practice. We have our licensed physicians or experts in their fields. Then we supercharge them and bring about a lot of efficiencies by leveraging AI.” — Anitha Kannan</p><p><br></p><p>"It's very easy to build 80% of a good product with AI today, but I think to get it to 100%, [and] to get it to scale, to be useful in [the] real world — evaluation is the number one thing." — Anitha Kannan</p><p><br></p><p>“At Curai, the AI team is composed of clinical experts, subject matter experts, researchers, and machine learning engineers. Every project, long-term or short-term, has a mix of these types of expertise in it. This allows us to work through the problem much more effectively.” — Anitha Kannan</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/anitha-kannan-ak/">Anitha Kannan on LinkedIn</a> </p><p><a href="https://x.com/anithakan?lang=en">Anitha Kannan on X</a></p><p><a href="https://www.curaihealth.com/">Curai Health</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>What will it take to bring affordable, accessible, and timely healthcare to all? Curai, an AI-powered virtual clinic, is on a mission to do just that by leveraging AI to enhance the efficiency of licensed physicians through text-based virtual primary care. In today’s episode, I sit down with Anitha Kannan, head of AI and founding member of Curai, to talk about the transformative potential of virtual primary care and its role in scaling healthcare access.</p><p>In our conversation, Anitha delves into the technical aspects of using large language models for patient data processing, the challenges of training models with clinical data, and the strategies Curai employs to ensure high-quality care. We also discuss the innovative ways Curai integrates AI into healthcare, the significance of multidisciplinary teams, and Anitha’s vision for the future of virtual care. Tune in for an insightful conversation on scaling healthcare through virtual primary care and learn how Curai is making a real impact!</p><p><strong>Key Points:</strong></p><ul><li>Some background on Anitha Kannan, and how she joined Curai.</li><li>An overview of Curai’s services as a virtual healthcare practice.</li><li>How they provide affordable and timely healthcare access through AI-enhanced systems.</li><li>Machine learning’s role in history taking, information gathering, and summarization.</li><li>How AI streamlines the workflow for physicians.</li><li>Their use of large language models to process patient data.</li><li>Training model challenges: ensuring clinical correctness and handling data omission issues.</li><li>Best practices they’ve developed for validating models and the importance of evaluation.</li><li>Fundamental differences between their work and how other LLMs, like ChatGPT, are trained.</li><li>Their strategy for balancing long-term research aspirations with short-term product development.</li><li>An overview of their multidisciplinary teams and how this contributes to their success.</li><li>Anitha’s hopes for the future of Curai; particularly through partnerships with healthcare organizations.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>"Our mission is to provide the best health care to everyone." — Anitha Kannan</p><p><br></p><p>“Today, [Carai runs] a text-based virtual primary care practice. We have our licensed physicians or experts in their fields. Then we supercharge them and bring about a lot of efficiencies by leveraging AI.” — Anitha Kannan</p><p><br></p><p>"It's very easy to build 80% of a good product with AI today, but I think to get it to 100%, [and] to get it to scale, to be useful in [the] real world — evaluation is the number one thing." — Anitha Kannan</p><p><br></p><p>“At Curai, the AI team is composed of clinical experts, subject matter experts, researchers, and machine learning engineers. Every project, long-term or short-term, has a mix of these types of expertise in it. This allows us to work through the problem much more effectively.” — Anitha Kannan</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/anitha-kannan-ak/">Anitha Kannan on LinkedIn</a> </p><p><a href="https://x.com/anithakan?lang=en">Anitha Kannan on X</a></p><p><a href="https://www.curaihealth.com/">Curai Health</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 01 Jul 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/1a9ec3dc/71b1d72f.mp3" length="21842396" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/e8x3Vuvfg4WKOqR_m3qVVvCYEkEAOaIq0Gfp3zuRI5Y/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS83ZjVi/ZjY5OGQ4MTYxZmJk/NmNiOTQ0NGViMTQ5/Mzg4Yi5qcGVn.jpg"/>
      <itunes:duration>1360</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>What will it take to bring affordable, accessible, and timely healthcare to all? Curai, an AI-powered virtual clinic, is on a mission to do just that by leveraging AI to enhance the efficiency of licensed physicians through text-based virtual primary care. In today’s episode, I sit down with Anitha Kannan, head of AI and founding member of Curai, to talk about the transformative potential of virtual primary care and its role in scaling healthcare access.</p><p>In our conversation, Anitha delves into the technical aspects of using large language models for patient data processing, the challenges of training models with clinical data, and the strategies Curai employs to ensure high-quality care. We also discuss the innovative ways Curai integrates AI into healthcare, the significance of multidisciplinary teams, and Anitha’s vision for the future of virtual care. Tune in for an insightful conversation on scaling healthcare through virtual primary care and learn how Curai is making a real impact!</p><p><strong>Key Points:</strong></p><ul><li>Some background on Anitha Kannan, and how she joined Curai.</li><li>An overview of Curai’s services as a virtual healthcare practice.</li><li>How they provide affordable and timely healthcare access through AI-enhanced systems.</li><li>Machine learning’s role in history taking, information gathering, and summarization.</li><li>How AI streamlines the workflow for physicians.</li><li>Their use of large language models to process patient data.</li><li>Training model challenges: ensuring clinical correctness and handling data omission issues.</li><li>Best practices they’ve developed for validating models and the importance of evaluation.</li><li>Fundamental differences between their work and how other LLMs, like ChatGPT, are trained.</li><li>Their strategy for balancing long-term research aspirations with short-term product development.</li><li>An overview of their multidisciplinary teams and how this contributes to their success.</li><li>Anitha’s hopes for the future of Curai; particularly through partnerships with healthcare organizations.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>"Our mission is to provide the best health care to everyone." — Anitha Kannan</p><p><br></p><p>“Today, [Carai runs] a text-based virtual primary care practice. We have our licensed physicians or experts in their fields. Then we supercharge them and bring about a lot of efficiencies by leveraging AI.” — Anitha Kannan</p><p><br></p><p>"It's very easy to build 80% of a good product with AI today, but I think to get it to 100%, [and] to get it to scale, to be useful in [the] real world — evaluation is the number one thing." — Anitha Kannan</p><p><br></p><p>“At Curai, the AI team is composed of clinical experts, subject matter experts, researchers, and machine learning engineers. Every project, long-term or short-term, has a mix of these types of expertise in it. This allows us to work through the problem much more effectively.” — Anitha Kannan</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/anitha-kannan-ak/">Anitha Kannan on LinkedIn</a> </p><p><a href="https://x.com/anithakan?lang=en">Anitha Kannan on X</a></p><p><a href="https://www.curaihealth.com/">Curai Health</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>ai, machine learning, healthcare</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/1a9ec3dc/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Better EV Batteries with Jason Koeller from Chemix</title>
      <itunes:episode>88</itunes:episode>
      <podcast:episode>88</podcast:episode>
      <itunes:title>Better EV Batteries with Jason Koeller from Chemix</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">f0c3f466-7245-47f1-9674-44c4c51127c9</guid>
      <link>https://pixelscientia.com/podcast/better-ev-batteries-with-jason-koeller-from-chemix/</link>
      <description>
        <![CDATA[<p>Batteries are arguably the most important technological innovation of the century, powering everything from mobile phones to electric vehicles (EVs). Unfortunately, most batteries have a significant impact on the environment, requiring increasingly scarce and valuable resources to manufacture and typically not designed for easy repair, reuse, or recycling.</p><p>Today on Impact AI, I'm joined by Jason Koeller, Co-Founder and CTO of Chemix, to find out how his company is leveraging AI to create better, more sustainable EV batteries that could reduce our reliance on elements like lithium, nickel, and cobalt, all without compromising vehicle performance. For a fascinating conversation with a data-driven physicist working at the intersection of software, machine learning, chemistry, and materials science, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Jason’s background in theoretical physics and how it led him to create Chemix.</li><li>Products and services offered by Chemix and the role that AI plays.</li><li>Four reasons that machine learning (ML) is at the core of everything Chemix does.</li><li>Unique challenges that their ML models need to contend with.</li><li>What goes into validating these models to ensure accuracy.</li><li>Why now is the right time for the technology that Chemix is developing.</li><li>Metrics for measuring the impact of a better EV battery.</li><li>Jason’s data-driven advice for leaders of AI-powered startups.</li><li>His “electrifying” vision for Chemix in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“All data analysis and decision-making is automated by our AI system. This includes analyzing terabytes of battery test data each day.” — Jason Koeller</p><p><br></p><p>“Looking at broad trends, [electric vehicles (EVs)] and AI have both become [things] that people have been talking a lot more about in the past 10 years and even more so in the past four or five years, and that has happened simultaneously.” — Jason Koeller</p><p><br></p><p>“Why is everyone not buying an EV? It's largely because they're too expensive or because people are worried they're not charging fast enough or they don't hold enough range for long road trips. – Improving any one of these metrics would be a measure of impact.” — Jason Koeller</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/jason-koeller/">Jason Koeller on LinkedIn</a></p><p><a href="https://chemix.ai/">Chemix</a></p><p><a href="https://www.linkedin.com/company/chemix-inc/">Chemix on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Batteries are arguably the most important technological innovation of the century, powering everything from mobile phones to electric vehicles (EVs). Unfortunately, most batteries have a significant impact on the environment, requiring increasingly scarce and valuable resources to manufacture and typically not designed for easy repair, reuse, or recycling.</p><p>Today on Impact AI, I'm joined by Jason Koeller, Co-Founder and CTO of Chemix, to find out how his company is leveraging AI to create better, more sustainable EV batteries that could reduce our reliance on elements like lithium, nickel, and cobalt, all without compromising vehicle performance. For a fascinating conversation with a data-driven physicist working at the intersection of software, machine learning, chemistry, and materials science, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Jason’s background in theoretical physics and how it led him to create Chemix.</li><li>Products and services offered by Chemix and the role that AI plays.</li><li>Four reasons that machine learning (ML) is at the core of everything Chemix does.</li><li>Unique challenges that their ML models need to contend with.</li><li>What goes into validating these models to ensure accuracy.</li><li>Why now is the right time for the technology that Chemix is developing.</li><li>Metrics for measuring the impact of a better EV battery.</li><li>Jason’s data-driven advice for leaders of AI-powered startups.</li><li>His “electrifying” vision for Chemix in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“All data analysis and decision-making is automated by our AI system. This includes analyzing terabytes of battery test data each day.” — Jason Koeller</p><p><br></p><p>“Looking at broad trends, [electric vehicles (EVs)] and AI have both become [things] that people have been talking a lot more about in the past 10 years and even more so in the past four or five years, and that has happened simultaneously.” — Jason Koeller</p><p><br></p><p>“Why is everyone not buying an EV? It's largely because they're too expensive or because people are worried they're not charging fast enough or they don't hold enough range for long road trips. – Improving any one of these metrics would be a measure of impact.” — Jason Koeller</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/jason-koeller/">Jason Koeller on LinkedIn</a></p><p><a href="https://chemix.ai/">Chemix</a></p><p><a href="https://www.linkedin.com/company/chemix-inc/">Chemix on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </content:encoded>
      <pubDate>Mon, 24 Jun 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/198bc058/e0608770.mp3" length="26490967" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/RipjQPuNteb-MZyRhpVZ4P2wqbZlTRWkyZQ4KiUBIhw/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS84MDM1/ZTAwYzRhNjk4MDM1/ZWFlNjY3NjQ0NTgx/OTQ3Yy5qcGVn.jpg"/>
      <itunes:duration>1650</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Batteries are arguably the most important technological innovation of the century, powering everything from mobile phones to electric vehicles (EVs). Unfortunately, most batteries have a significant impact on the environment, requiring increasingly scarce and valuable resources to manufacture and typically not designed for easy repair, reuse, or recycling.</p><p>Today on Impact AI, I'm joined by Jason Koeller, Co-Founder and CTO of Chemix, to find out how his company is leveraging AI to create better, more sustainable EV batteries that could reduce our reliance on elements like lithium, nickel, and cobalt, all without compromising vehicle performance. For a fascinating conversation with a data-driven physicist working at the intersection of software, machine learning, chemistry, and materials science, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Jason’s background in theoretical physics and how it led him to create Chemix.</li><li>Products and services offered by Chemix and the role that AI plays.</li><li>Four reasons that machine learning (ML) is at the core of everything Chemix does.</li><li>Unique challenges that their ML models need to contend with.</li><li>What goes into validating these models to ensure accuracy.</li><li>Why now is the right time for the technology that Chemix is developing.</li><li>Metrics for measuring the impact of a better EV battery.</li><li>Jason’s data-driven advice for leaders of AI-powered startups.</li><li>His “electrifying” vision for Chemix in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“All data analysis and decision-making is automated by our AI system. This includes analyzing terabytes of battery test data each day.” — Jason Koeller</p><p><br></p><p>“Looking at broad trends, [electric vehicles (EVs)] and AI have both become [things] that people have been talking a lot more about in the past 10 years and even more so in the past four or five years, and that has happened simultaneously.” — Jason Koeller</p><p><br></p><p>“Why is everyone not buying an EV? It's largely because they're too expensive or because people are worried they're not charging fast enough or they don't hold enough range for long road trips. – Improving any one of these metrics would be a measure of impact.” — Jason Koeller</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/jason-koeller/">Jason Koeller on LinkedIn</a></p><p><a href="https://chemix.ai/">Chemix</a></p><p><a href="https://www.linkedin.com/company/chemix-inc/">Chemix on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, battery, EV</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/198bc058/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Personalized Cancer Treatment Decisions with Nathan Silberman from Artera</title>
      <itunes:episode>87</itunes:episode>
      <podcast:episode>87</podcast:episode>
      <itunes:title>Personalized Cancer Treatment Decisions with Nathan Silberman from Artera</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">0c8b3472-2f2b-4cd3-b34a-f8056c1e42b8</guid>
      <link>https://pixelscientia.com/podcast/personalized-cancer-treatment-decisions-with-nathan-silberman-from-artera/</link>
      <description>
        <![CDATA[<p>Being given a cancer diagnosis is one of the worst pieces of news you can receive as a patient. This is often made even more difficult by the fact that choosing a treatment option is rarely simple or easy. Clinicians need to make multiple assessments before they can move forward, and even then it is often difficult or impossible to make unambiguous predictions. That’s where Artera comes in, a company using multimodal AI tests to provide individualized results for cancer patients, which enables clinicians and patients to make personalized treatment decisions, together.</p><p>I am joined today by Nathan Silberman, Vice President of Machine Learning and Engineering at Artera, to talk about how Artera’s technology is paving the way for personalized cancer treatment decisions. Join us today, as we get into how Artera is contributing to the cancer treatment process, some of the biggest challenges they face, and how they are addressing these through specifically trained algorithms and robust validation protocols. Be sure to tune in to this important conversation on how Artera is impacting cancer treatment outcomes for the better!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background on our guest, Nathan Silberman, and what led him to Artera.</li><li>How Artera is helping clinicians make informed decisions for cancer treatments.</li><li>The role of machine learning in their personalized risk assessments for patients.</li><li>Key challenges they’ve encountered with pathology data.</li><li>How they deal with slide variations through well-trained algorithms.</li><li>Bias in pathology data and what Artera is doing to mitigate bias.</li><li>Their partnerships with academics, clinicians, and oncologists.</li><li>Insight into the variety of approaches they use to validate their models.</li><li>How their tests fit in with clinical workflows and assist doctors and patients.</li><li>The agonizing wait time associated with traditional non-AI testing methods.</li><li>How Artera is providing quick and reliable test results.</li><li>Advice to leaders of AI-powered startups: stay focused on the ultimate goal of patient impact.</li><li>Looking ahead at Artera’s impact in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Which therapy to choose is simply not an easy choice. Clinicians would ideally be able to accurately assess a patient's risk of a cancer spreading, or adversely affecting the patient's health in the short term. But often, that's hard or impossible for a clinician to predict.” — Nathan Silberman</p><p><br></p><p>“Clinicians have been wanting and waiting for tools that can predict whether or not a therapy will work for that particular patient. This is ultimately where Artera steps in.” — Nathan Silberman</p><p><br></p><p>“Rather than wait a month, Artera's test provides the answer within two to three days after the lab receives the biopsy slide. And it is so rewarding to hear from clinicians, and especially patients about the relief we can provide by giving clarity sooner.” — Nathan Silberman</p><p><br></p><p>“I think the biggest piece of advice I can give is really just making sure that you're laser-focused on the ultimate goal of patient impact.” — Nathan Silberman</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://artera.ai/">Artera</a></p><p><a href="https://www.linkedin.com/in/nathan-silberman-ai/">Nathan Silberman on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Being given a cancer diagnosis is one of the worst pieces of news you can receive as a patient. This is often made even more difficult by the fact that choosing a treatment option is rarely simple or easy. Clinicians need to make multiple assessments before they can move forward, and even then it is often difficult or impossible to make unambiguous predictions. That’s where Artera comes in, a company using multimodal AI tests to provide individualized results for cancer patients, which enables clinicians and patients to make personalized treatment decisions, together.</p><p>I am joined today by Nathan Silberman, Vice President of Machine Learning and Engineering at Artera, to talk about how Artera’s technology is paving the way for personalized cancer treatment decisions. Join us today, as we get into how Artera is contributing to the cancer treatment process, some of the biggest challenges they face, and how they are addressing these through specifically trained algorithms and robust validation protocols. Be sure to tune in to this important conversation on how Artera is impacting cancer treatment outcomes for the better!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background on our guest, Nathan Silberman, and what led him to Artera.</li><li>How Artera is helping clinicians make informed decisions for cancer treatments.</li><li>The role of machine learning in their personalized risk assessments for patients.</li><li>Key challenges they’ve encountered with pathology data.</li><li>How they deal with slide variations through well-trained algorithms.</li><li>Bias in pathology data and what Artera is doing to mitigate bias.</li><li>Their partnerships with academics, clinicians, and oncologists.</li><li>Insight into the variety of approaches they use to validate their models.</li><li>How their tests fit in with clinical workflows and assist doctors and patients.</li><li>The agonizing wait time associated with traditional non-AI testing methods.</li><li>How Artera is providing quick and reliable test results.</li><li>Advice to leaders of AI-powered startups: stay focused on the ultimate goal of patient impact.</li><li>Looking ahead at Artera’s impact in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Which therapy to choose is simply not an easy choice. Clinicians would ideally be able to accurately assess a patient's risk of a cancer spreading, or adversely affecting the patient's health in the short term. But often, that's hard or impossible for a clinician to predict.” — Nathan Silberman</p><p><br></p><p>“Clinicians have been wanting and waiting for tools that can predict whether or not a therapy will work for that particular patient. This is ultimately where Artera steps in.” — Nathan Silberman</p><p><br></p><p>“Rather than wait a month, Artera's test provides the answer within two to three days after the lab receives the biopsy slide. And it is so rewarding to hear from clinicians, and especially patients about the relief we can provide by giving clarity sooner.” — Nathan Silberman</p><p><br></p><p>“I think the biggest piece of advice I can give is really just making sure that you're laser-focused on the ultimate goal of patient impact.” — Nathan Silberman</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://artera.ai/">Artera</a></p><p><a href="https://www.linkedin.com/in/nathan-silberman-ai/">Nathan Silberman on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 17 Jun 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/4b7743fc/6e231860.mp3" length="24743005" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/WqILOZpyMd51ZtatoRxE4E4VHpsCW7CmaUAuNrVFSPE/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9iOWE3/MTJlMDRhMTI5NjFk/YTFhMGJhZWVlYjIy/ZTU3MC5qcGVn.jpg"/>
      <itunes:duration>1030</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Being given a cancer diagnosis is one of the worst pieces of news you can receive as a patient. This is often made even more difficult by the fact that choosing a treatment option is rarely simple or easy. Clinicians need to make multiple assessments before they can move forward, and even then it is often difficult or impossible to make unambiguous predictions. That’s where Artera comes in, a company using multimodal AI tests to provide individualized results for cancer patients, which enables clinicians and patients to make personalized treatment decisions, together.</p><p>I am joined today by Nathan Silberman, Vice President of Machine Learning and Engineering at Artera, to talk about how Artera’s technology is paving the way for personalized cancer treatment decisions. Join us today, as we get into how Artera is contributing to the cancer treatment process, some of the biggest challenges they face, and how they are addressing these through specifically trained algorithms and robust validation protocols. Be sure to tune in to this important conversation on how Artera is impacting cancer treatment outcomes for the better!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background on our guest, Nathan Silberman, and what led him to Artera.</li><li>How Artera is helping clinicians make informed decisions for cancer treatments.</li><li>The role of machine learning in their personalized risk assessments for patients.</li><li>Key challenges they’ve encountered with pathology data.</li><li>How they deal with slide variations through well-trained algorithms.</li><li>Bias in pathology data and what Artera is doing to mitigate bias.</li><li>Their partnerships with academics, clinicians, and oncologists.</li><li>Insight into the variety of approaches they use to validate their models.</li><li>How their tests fit in with clinical workflows and assist doctors and patients.</li><li>The agonizing wait time associated with traditional non-AI testing methods.</li><li>How Artera is providing quick and reliable test results.</li><li>Advice to leaders of AI-powered startups: stay focused on the ultimate goal of patient impact.</li><li>Looking ahead at Artera’s impact in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Which therapy to choose is simply not an easy choice. Clinicians would ideally be able to accurately assess a patient's risk of a cancer spreading, or adversely affecting the patient's health in the short term. But often, that's hard or impossible for a clinician to predict.” — Nathan Silberman</p><p><br></p><p>“Clinicians have been wanting and waiting for tools that can predict whether or not a therapy will work for that particular patient. This is ultimately where Artera steps in.” — Nathan Silberman</p><p><br></p><p>“Rather than wait a month, Artera's test provides the answer within two to three days after the lab receives the biopsy slide. And it is so rewarding to hear from clinicians, and especially patients about the relief we can provide by giving clarity sooner.” — Nathan Silberman</p><p><br></p><p>“I think the biggest piece of advice I can give is really just making sure that you're laser-focused on the ultimate goal of patient impact.” — Nathan Silberman</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://artera.ai/">Artera</a></p><p><a href="https://www.linkedin.com/in/nathan-silberman-ai/">Nathan Silberman on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, computational pathology, precision medicine, medical imaging</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/4b7743fc/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Faster Object Search with Corey Jaskolski from Synthetaic</title>
      <itunes:episode>86</itunes:episode>
      <podcast:episode>86</podcast:episode>
      <itunes:title>Faster Object Search with Corey Jaskolski from Synthetaic</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">7752ff7a-68ac-4906-a5f3-146c97fb8843</guid>
      <link>https://pixelscientia.com/podcast/faster-object-search-with-corey-jaskolski-from-synthetaic/</link>
      <description>
        <![CDATA[<p>What if there was a way to revolutionize image-based AI, eliminating the need for extensive prework? In this episode, I sit down with Corey Jaskolski, Founder and President of Synthetaic, to talk about finding objects in images and video quickly. Synthetaic is redefining the landscape of data analysis with its groundbreaking technology that eliminates the need for time-consuming human labeling or pre-built models. It specializes in the rapid analysis of large, unlabeled video and image datasets.</p><p>In our conversation, we delve into the groundbreaking technology behind Synthetaic's flagship product and how it is revolutionizing image and video processing. Explore how it utilizes an unsupervised backend to swiftly analyze and interpret data, how it is able to work with any kind of image data, and the process behind ingesting and embedding image objects. Discover how Synthetaic navigates biased data and leverages domain expertise to ensure accurate and ethical AI solutions. Gain insights into the gaps holding AI’s application to images back, the different ways the company’s technology can be applied, the future development of Synthetaic, and more!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Corey’s background in AI and ML and what led to the creation of Synthetaic.</li><li>Why Synthetaic focuses on processing images and videos quickly.</li><li>How the company leverages ML in its approach. </li><li>Details about image ingestion and embedding processes.</li><li>How the definition of potential objects varies depending on the type of imagery used.</li><li>Explore the role of domain expertise in addressing challenges. </li><li>Hear examples of the technology’s diverse range of applications.</li><li>Recommendations to leaders of AI-powered startups. </li><li>His hope for the future trajectory of Synthetaic.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We think about the machine learning problems a little bit differently, because we're not labeling data to go ahead and build a bespoke frozen traditional AI model.” — Corey Jaskolski</p><p><br></p><p>“We take this very broad view of objects where anything that could be discrete from anything else in the imagery gets called an object, at the risk of basically finding, if you will, too many objects.” — Corey Jaskolski</p><p><br></p><p>“We think of RAIC as something that solves the cold start problem really well.” — Corey Jaskolski</p><p><br></p><p>“By and large, we're training image and video-based AIs the same way. We need a paradigm shift that really allows AI to be the force multiplier that it can be.” — Corey Jaskolski</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/coreyjaskolski/">Corey Jaskolski on LinkedIn</a></p><p><a href="https://twitter.com/coreyjaskolski">Corey Jaskolski on X</a></p><p><a href="https://www.synthetaic.com/">Synthetaic</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>What if there was a way to revolutionize image-based AI, eliminating the need for extensive prework? In this episode, I sit down with Corey Jaskolski, Founder and President of Synthetaic, to talk about finding objects in images and video quickly. Synthetaic is redefining the landscape of data analysis with its groundbreaking technology that eliminates the need for time-consuming human labeling or pre-built models. It specializes in the rapid analysis of large, unlabeled video and image datasets.</p><p>In our conversation, we delve into the groundbreaking technology behind Synthetaic's flagship product and how it is revolutionizing image and video processing. Explore how it utilizes an unsupervised backend to swiftly analyze and interpret data, how it is able to work with any kind of image data, and the process behind ingesting and embedding image objects. Discover how Synthetaic navigates biased data and leverages domain expertise to ensure accurate and ethical AI solutions. Gain insights into the gaps holding AI’s application to images back, the different ways the company’s technology can be applied, the future development of Synthetaic, and more!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Corey’s background in AI and ML and what led to the creation of Synthetaic.</li><li>Why Synthetaic focuses on processing images and videos quickly.</li><li>How the company leverages ML in its approach. </li><li>Details about image ingestion and embedding processes.</li><li>How the definition of potential objects varies depending on the type of imagery used.</li><li>Explore the role of domain expertise in addressing challenges. </li><li>Hear examples of the technology’s diverse range of applications.</li><li>Recommendations to leaders of AI-powered startups. </li><li>His hope for the future trajectory of Synthetaic.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We think about the machine learning problems a little bit differently, because we're not labeling data to go ahead and build a bespoke frozen traditional AI model.” — Corey Jaskolski</p><p><br></p><p>“We take this very broad view of objects where anything that could be discrete from anything else in the imagery gets called an object, at the risk of basically finding, if you will, too many objects.” — Corey Jaskolski</p><p><br></p><p>“We think of RAIC as something that solves the cold start problem really well.” — Corey Jaskolski</p><p><br></p><p>“By and large, we're training image and video-based AIs the same way. We need a paradigm shift that really allows AI to be the force multiplier that it can be.” — Corey Jaskolski</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/coreyjaskolski/">Corey Jaskolski on LinkedIn</a></p><p><a href="https://twitter.com/coreyjaskolski">Corey Jaskolski on X</a></p><p><a href="https://www.synthetaic.com/">Synthetaic</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 10 Jun 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/d59492b9/f90a4d65.mp3" length="39271021" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/n-CgxQgx9eyxm_h9pHw0sG-eKA_l5QRKqz93HwIa6OQ/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS8xMDJl/YTFlMjRlZjlhMDZl/NTkzNGJlMzgxYTRl/OWQ3YS5qcGVn.jpg"/>
      <itunes:duration>1632</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>What if there was a way to revolutionize image-based AI, eliminating the need for extensive prework? In this episode, I sit down with Corey Jaskolski, Founder and President of Synthetaic, to talk about finding objects in images and video quickly. Synthetaic is redefining the landscape of data analysis with its groundbreaking technology that eliminates the need for time-consuming human labeling or pre-built models. It specializes in the rapid analysis of large, unlabeled video and image datasets.</p><p>In our conversation, we delve into the groundbreaking technology behind Synthetaic's flagship product and how it is revolutionizing image and video processing. Explore how it utilizes an unsupervised backend to swiftly analyze and interpret data, how it is able to work with any kind of image data, and the process behind ingesting and embedding image objects. Discover how Synthetaic navigates biased data and leverages domain expertise to ensure accurate and ethical AI solutions. Gain insights into the gaps holding AI’s application to images back, the different ways the company’s technology can be applied, the future development of Synthetaic, and more!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Corey’s background in AI and ML and what led to the creation of Synthetaic.</li><li>Why Synthetaic focuses on processing images and videos quickly.</li><li>How the company leverages ML in its approach. </li><li>Details about image ingestion and embedding processes.</li><li>How the definition of potential objects varies depending on the type of imagery used.</li><li>Explore the role of domain expertise in addressing challenges. </li><li>Hear examples of the technology’s diverse range of applications.</li><li>Recommendations to leaders of AI-powered startups. </li><li>His hope for the future trajectory of Synthetaic.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We think about the machine learning problems a little bit differently, because we're not labeling data to go ahead and build a bespoke frozen traditional AI model.” — Corey Jaskolski</p><p><br></p><p>“We take this very broad view of objects where anything that could be discrete from anything else in the imagery gets called an object, at the risk of basically finding, if you will, too many objects.” — Corey Jaskolski</p><p><br></p><p>“We think of RAIC as something that solves the cold start problem really well.” — Corey Jaskolski</p><p><br></p><p>“By and large, we're training image and video-based AIs the same way. We need a paradigm shift that really allows AI to be the force multiplier that it can be.” — Corey Jaskolski</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/coreyjaskolski/">Corey Jaskolski on LinkedIn</a></p><p><a href="https://twitter.com/coreyjaskolski">Corey Jaskolski on X</a></p><p><a href="https://www.synthetaic.com/">Synthetaic</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, object detection, image search</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/d59492b9/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Digital Twins for Clinical Trials with Charles Fisher from Unlearn AI</title>
      <itunes:episode>85</itunes:episode>
      <podcast:episode>85</podcast:episode>
      <itunes:title>Digital Twins for Clinical Trials with Charles Fisher from Unlearn AI</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">d77b10b3-882e-4c44-8cb6-929a731290f0</guid>
      <link>https://pixelscientia.com/podcast/digital-twins-for-clinical-trials-with-charles-fisher-from-unlearn-ai/</link>
      <description>
        <![CDATA[<p>What if AI could improve the outcomes of clinical trials by making them more efficient and reducing the number of patients receiving placebos? Well, today’s guest, Charles Fisher is here to tell us all about how his company, Unlearn AI, is creating digital twins to do just that! In this conversation, you’ll hear all about Charles' academic background, what made him decide to create Unlearn AI, what the company does, and how they work within clinical trials. We delve into the problems they focus on and the data they collect before Charles tells us about their zero-trust solution. We even discuss Charles’ opinions of how domain knowledge should be used in machine learning. Finally, our guest shares advice for leaders of AI-powered startups. To hear all this and even find out what to expect from Unlearn in the near future, tune in now!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A rundown of Charles Fisher’s background and what led him to create Unlearn AI. </li><li>What Unlearn does, what digital twins are, and why they’re important. </li><li>How clinical trials work and how they are used within Unlearn. </li><li>The kinds of data they use and how they tackle these clinical trials using machine learning. </li><li>What a zero-trust solution is and how Unlearn guarantees that their results are accurate. </li><li>Charles shares his thoughts on the role of domain expertise in machine learning. </li><li>His advice for any leaders of AI-powered startups. </li><li>What we can expect from Unlearn in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[Unlearn is] typically working on running clinical trials where we might be able to reduce the number of patients who get the placebo by somewhere like – 50%.” — Charles Fisher</p><p><br></p><p>“[Unlearn] can prove that these studies produce the right answer, even though they leverage these AI algorithms.” — Charles Fisher</p><p><br></p><p>“It's very difficult to find examples where you can actually have a zero-trust application of AI. I actually don't know of another one besides [Unlearn’s].” — Charles Fisher</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/charleskfisher/">Charles Fisher on LinkedIn</a></p><p><a href="https://twitter.com/charleskfisher">Charles Fisher on X</a></p><p><a href="https://www.unlearn.ai/">Unlearn AI</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>What if AI could improve the outcomes of clinical trials by making them more efficient and reducing the number of patients receiving placebos? Well, today’s guest, Charles Fisher is here to tell us all about how his company, Unlearn AI, is creating digital twins to do just that! In this conversation, you’ll hear all about Charles' academic background, what made him decide to create Unlearn AI, what the company does, and how they work within clinical trials. We delve into the problems they focus on and the data they collect before Charles tells us about their zero-trust solution. We even discuss Charles’ opinions of how domain knowledge should be used in machine learning. Finally, our guest shares advice for leaders of AI-powered startups. To hear all this and even find out what to expect from Unlearn in the near future, tune in now!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A rundown of Charles Fisher’s background and what led him to create Unlearn AI. </li><li>What Unlearn does, what digital twins are, and why they’re important. </li><li>How clinical trials work and how they are used within Unlearn. </li><li>The kinds of data they use and how they tackle these clinical trials using machine learning. </li><li>What a zero-trust solution is and how Unlearn guarantees that their results are accurate. </li><li>Charles shares his thoughts on the role of domain expertise in machine learning. </li><li>His advice for any leaders of AI-powered startups. </li><li>What we can expect from Unlearn in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[Unlearn is] typically working on running clinical trials where we might be able to reduce the number of patients who get the placebo by somewhere like – 50%.” — Charles Fisher</p><p><br></p><p>“[Unlearn] can prove that these studies produce the right answer, even though they leverage these AI algorithms.” — Charles Fisher</p><p><br></p><p>“It's very difficult to find examples where you can actually have a zero-trust application of AI. I actually don't know of another one besides [Unlearn’s].” — Charles Fisher</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/charleskfisher/">Charles Fisher on LinkedIn</a></p><p><a href="https://twitter.com/charleskfisher">Charles Fisher on X</a></p><p><a href="https://www.unlearn.ai/">Unlearn AI</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 03 Jun 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/c53ff75f/5fd98c21.mp3" length="29178878" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/gb2yp6mXv3fwEuNImhkgkj47kR7a_en2a3e1TFD1qf8/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9mZDAw/YTU3NDk3ZjEyZjBh/ZjdlZjM4M2RkN2Mx/YWJlZC5qcGVn.jpg"/>
      <itunes:duration>1819</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>What if AI could improve the outcomes of clinical trials by making them more efficient and reducing the number of patients receiving placebos? Well, today’s guest, Charles Fisher is here to tell us all about how his company, Unlearn AI, is creating digital twins to do just that! In this conversation, you’ll hear all about Charles' academic background, what made him decide to create Unlearn AI, what the company does, and how they work within clinical trials. We delve into the problems they focus on and the data they collect before Charles tells us about their zero-trust solution. We even discuss Charles’ opinions of how domain knowledge should be used in machine learning. Finally, our guest shares advice for leaders of AI-powered startups. To hear all this and even find out what to expect from Unlearn in the near future, tune in now!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A rundown of Charles Fisher’s background and what led him to create Unlearn AI. </li><li>What Unlearn does, what digital twins are, and why they’re important. </li><li>How clinical trials work and how they are used within Unlearn. </li><li>The kinds of data they use and how they tackle these clinical trials using machine learning. </li><li>What a zero-trust solution is and how Unlearn guarantees that their results are accurate. </li><li>Charles shares his thoughts on the role of domain expertise in machine learning. </li><li>His advice for any leaders of AI-powered startups. </li><li>What we can expect from Unlearn in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[Unlearn is] typically working on running clinical trials where we might be able to reduce the number of patients who get the placebo by somewhere like – 50%.” — Charles Fisher</p><p><br></p><p>“[Unlearn] can prove that these studies produce the right answer, even though they leverage these AI algorithms.” — Charles Fisher</p><p><br></p><p>“It's very difficult to find examples where you can actually have a zero-trust application of AI. I actually don't know of another one besides [Unlearn’s].” — Charles Fisher</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/charleskfisher/">Charles Fisher on LinkedIn</a></p><p><a href="https://twitter.com/charleskfisher">Charles Fisher on X</a></p><p><a href="https://www.unlearn.ai/">Unlearn AI</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, clinical trial, digital twin</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/c53ff75f/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Cutting Carbon in Concrete with Mathieu Bauchy from Concrete.ai</title>
      <itunes:episode>84</itunes:episode>
      <podcast:episode>84</podcast:episode>
      <itunes:title>Cutting Carbon in Concrete with Mathieu Bauchy from Concrete.ai</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">cda934c3-aac2-49b6-b582-3acca11494b1</guid>
      <link>https://pixelscientia.com/podcast/cutting-carbon-in-concrete-with-mathieu-bauchy-from-concrete-ai/</link>
      <description>
        <![CDATA[<p>Did you know that concrete is the second most-used material in the world after water? Although it has largely defined modern society, concrete has a hidden climate cost: it is responsible for 1.6 billion tons of carbon dioxide entering the atmosphere annually. For context, that’s more than the entire aviation industry! With these statistics in mind, today’s guest is on a mission to decarbonize the construction industry. As the CTO and co-founder of cleantech startup, Concrete.ai, Mathieu Bauchy is using his expertise in artificial intelligence and materials modeling to prescribe new concrete formulations that are less carbon-intensive and more economical. Today, Mathieu joins me to offer insight into Concrete.ai's exciting technology, why it’s important for the planet, and how it can reduce concrete emissions by a third while also ensuring that concrete producers maximize margins and streamline their supply chains. To find out how this is possible without any changes to the raw materials, no modification of the production process, and no cost premium, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into Mathieu’s research focus and how it led him to create Concrete.ai.</li><li>What Concrete.ai does and why it’s important for reducing CO2 emissions.</li><li>The role of machine learning, particularly generative AI, in this technology.</li><li>How Concrete.ai develops ML models that are reliably able to extrapolate.</li><li>Why estimating uncertainty is important and how Concrete.ai approaches it.</li><li>What goes into validating these models, including systematic testing in the field.</li><li>Reasons that the timing for Concrete.ai’s technology is critical.</li><li>Dollars saved and other metrics for measuring the impact of this technology.</li><li>Mathieu’s humanity-focused advice for other leaders of AI-powered startups.</li><li>How Concrete.ai’s impact will continue to expand and evolve.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Concrete is responsible for 8% of the total CO2 emissions in the world. To give you some context, that's about three times more emissions than the entire aviation industry.” — Mathieu Bauchy</p><p><br></p><p>“We think that it's the right time for the concrete industry to benefit from what AI can offer to avoid waste during the production of concrete. The idea is that, if we adopt these new technologies, then we can continue to improve our quality of life.” — Mathieu Bauchy</p><p><br></p><p>“It's not like we are changing the way concrete is made. It's still made in the same plant. It's still made using the same materials. We are just changing the recipe, and just that [can] save about a third of the emissions of concrete.” — Mathieu Bauchy</p><p><br></p><p>“AI also comes with its own carbon footprint and, to some extent, also contributes to climate change. We should think about how we use AI to solve climate change and not further contribute to it.” — Mathieu Bauchy</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.concrete.ai/">Concrete.ai</a><br><a href="https://www.linkedin.com/company/concrete-ai/">Concrete.ai on LinkedIn</a></p><p><a href="https://samueli.ucla.edu/people/mathieu-bauchy/">Mathieu Bauchy</a></p><p><a href="https://www.linkedin.com/in/bauchy/">Mathieu Bauchy on LinkedIn</a></p><p><a href="https://www.youtube.com/@MBauchy">Mathieu Bauchy on YouTube</a></p><p><a href="https://twitter.com/Moutmat">Mathieu Bauchy on X</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Did you know that concrete is the second most-used material in the world after water? Although it has largely defined modern society, concrete has a hidden climate cost: it is responsible for 1.6 billion tons of carbon dioxide entering the atmosphere annually. For context, that’s more than the entire aviation industry! With these statistics in mind, today’s guest is on a mission to decarbonize the construction industry. As the CTO and co-founder of cleantech startup, Concrete.ai, Mathieu Bauchy is using his expertise in artificial intelligence and materials modeling to prescribe new concrete formulations that are less carbon-intensive and more economical. Today, Mathieu joins me to offer insight into Concrete.ai's exciting technology, why it’s important for the planet, and how it can reduce concrete emissions by a third while also ensuring that concrete producers maximize margins and streamline their supply chains. To find out how this is possible without any changes to the raw materials, no modification of the production process, and no cost premium, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into Mathieu’s research focus and how it led him to create Concrete.ai.</li><li>What Concrete.ai does and why it’s important for reducing CO2 emissions.</li><li>The role of machine learning, particularly generative AI, in this technology.</li><li>How Concrete.ai develops ML models that are reliably able to extrapolate.</li><li>Why estimating uncertainty is important and how Concrete.ai approaches it.</li><li>What goes into validating these models, including systematic testing in the field.</li><li>Reasons that the timing for Concrete.ai’s technology is critical.</li><li>Dollars saved and other metrics for measuring the impact of this technology.</li><li>Mathieu’s humanity-focused advice for other leaders of AI-powered startups.</li><li>How Concrete.ai’s impact will continue to expand and evolve.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Concrete is responsible for 8% of the total CO2 emissions in the world. To give you some context, that's about three times more emissions than the entire aviation industry.” — Mathieu Bauchy</p><p><br></p><p>“We think that it's the right time for the concrete industry to benefit from what AI can offer to avoid waste during the production of concrete. The idea is that, if we adopt these new technologies, then we can continue to improve our quality of life.” — Mathieu Bauchy</p><p><br></p><p>“It's not like we are changing the way concrete is made. It's still made in the same plant. It's still made using the same materials. We are just changing the recipe, and just that [can] save about a third of the emissions of concrete.” — Mathieu Bauchy</p><p><br></p><p>“AI also comes with its own carbon footprint and, to some extent, also contributes to climate change. We should think about how we use AI to solve climate change and not further contribute to it.” — Mathieu Bauchy</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.concrete.ai/">Concrete.ai</a><br><a href="https://www.linkedin.com/company/concrete-ai/">Concrete.ai on LinkedIn</a></p><p><a href="https://samueli.ucla.edu/people/mathieu-bauchy/">Mathieu Bauchy</a></p><p><a href="https://www.linkedin.com/in/bauchy/">Mathieu Bauchy on LinkedIn</a></p><p><a href="https://www.youtube.com/@MBauchy">Mathieu Bauchy on YouTube</a></p><p><a href="https://twitter.com/Moutmat">Mathieu Bauchy on X</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 27 May 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/97eebf9e/12e2b5e3.mp3" length="44322068" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/gZgEl8Gbisf40UXG1SmwY4k4qvvNL3TGjQXDrzLhIh8/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS8xZTc1/NzM1MWU2Y2Q3ZDZi/NWMxNWY4NTg3MDgz/ZWJiMy5qcGVn.jpg"/>
      <itunes:duration>1842</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Did you know that concrete is the second most-used material in the world after water? Although it has largely defined modern society, concrete has a hidden climate cost: it is responsible for 1.6 billion tons of carbon dioxide entering the atmosphere annually. For context, that’s more than the entire aviation industry! With these statistics in mind, today’s guest is on a mission to decarbonize the construction industry. As the CTO and co-founder of cleantech startup, Concrete.ai, Mathieu Bauchy is using his expertise in artificial intelligence and materials modeling to prescribe new concrete formulations that are less carbon-intensive and more economical. Today, Mathieu joins me to offer insight into Concrete.ai's exciting technology, why it’s important for the planet, and how it can reduce concrete emissions by a third while also ensuring that concrete producers maximize margins and streamline their supply chains. To find out how this is possible without any changes to the raw materials, no modification of the production process, and no cost premium, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into Mathieu’s research focus and how it led him to create Concrete.ai.</li><li>What Concrete.ai does and why it’s important for reducing CO2 emissions.</li><li>The role of machine learning, particularly generative AI, in this technology.</li><li>How Concrete.ai develops ML models that are reliably able to extrapolate.</li><li>Why estimating uncertainty is important and how Concrete.ai approaches it.</li><li>What goes into validating these models, including systematic testing in the field.</li><li>Reasons that the timing for Concrete.ai’s technology is critical.</li><li>Dollars saved and other metrics for measuring the impact of this technology.</li><li>Mathieu’s humanity-focused advice for other leaders of AI-powered startups.</li><li>How Concrete.ai’s impact will continue to expand and evolve.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Concrete is responsible for 8% of the total CO2 emissions in the world. To give you some context, that's about three times more emissions than the entire aviation industry.” — Mathieu Bauchy</p><p><br></p><p>“We think that it's the right time for the concrete industry to benefit from what AI can offer to avoid waste during the production of concrete. The idea is that, if we adopt these new technologies, then we can continue to improve our quality of life.” — Mathieu Bauchy</p><p><br></p><p>“It's not like we are changing the way concrete is made. It's still made in the same plant. It's still made using the same materials. We are just changing the recipe, and just that [can] save about a third of the emissions of concrete.” — Mathieu Bauchy</p><p><br></p><p>“AI also comes with its own carbon footprint and, to some extent, also contributes to climate change. We should think about how we use AI to solve climate change and not further contribute to it.” — Mathieu Bauchy</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.concrete.ai/">Concrete.ai</a><br><a href="https://www.linkedin.com/company/concrete-ai/">Concrete.ai on LinkedIn</a></p><p><a href="https://samueli.ucla.edu/people/mathieu-bauchy/">Mathieu Bauchy</a></p><p><a href="https://www.linkedin.com/in/bauchy/">Mathieu Bauchy on LinkedIn</a></p><p><a href="https://www.youtube.com/@MBauchy">Mathieu Bauchy on YouTube</a></p><p><a href="https://twitter.com/Moutmat">Mathieu Bauchy on X</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, generative ai, concrete</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/97eebf9e/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Decoding Pathology for Precision Medicine with Maximilian Alber from Aignostics</title>
      <itunes:episode>83</itunes:episode>
      <podcast:episode>83</podcast:episode>
      <itunes:title>Decoding Pathology for Precision Medicine with Maximilian Alber from Aignostics</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">cc4f6185-f5b4-4071-938c-0dd6e94fdffa</guid>
      <link>https://pixelscientia.com/podcast/decoding-pathology-for-precision-medicine-with-maximilian-alber-from-aignostics/</link>
      <description>
        <![CDATA[<p>Today, I am joined by Maximilian Alber, Co-founder and CTO of Aignostics, to talk about pathology for precision medicine. You’ll learn about Aignostics’s mission, how they are impacting healthcare, and the transformative power of foundational models. Max explains how Aignostics is driven by the belief that machine learning and data science will help improve healthcare before expanding on the role of foundational models. He describes how they built their foundational model, what sets it apart from other models, and why diversity in their datasets is key. He also breaks down how foundational models have allowed them to develop other models more quickly and better navigate explainability with concepts that are challenging for machine learning. We wrap up with Max’s advice for leaders of other AI-powered startups and where he expects Aignostics will be in the next five years. Tune in now to learn all about foundational models and the innovative work being done at Aignostics!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into Max’s role at Aignostics and how the company is impacting healthcare.</li><li>How they use machine learning to set themselves apart from their competitors.</li><li>A rundown of their models and datasets.</li><li>The definition of a foundation model and how Aignostics built theirs.</li><li>How to use foundation models as a starting point for building machine learning applications.</li><li>What sets Aignostics’ foundation model for histopathology apart from other similar models.</li><li>How their foundation model enables them to develop other models more quickly.</li><li>Top lessons Max has learned from developing foundation models.</li><li>How they navigate explainability with concepts that are challenging for machine learning.</li><li>The positive impact that foundational models have had on explainability.</li><li>Recent advancements that Max is excited about as potential use cases for Aignostics.</li><li>Max’s advice to leaders of other AI-powered startups.</li><li>The impact of Aignostics and where he expects it will be in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our mission is to turn biomedical data into insights.” — Maximilian Alber</p><p><br></p><p>“Everything we do is driven by the belief that machine learning and data science will help us improve healthcare.” — Maximilian Alber</p><p><br></p><p>“A foundation model is a model that can be used as a starting point for building a machine learning application, with the promise that the foundation model already has a great understanding of the domain.” — Maximilian Alber</p><p><br></p><p>“We are in active discussions for licensing our foundation model to other companies in order to enable their development as well. [What’s] important here is that we develop our foundation model along regulatory requirements, which will allow it to be used in medical products.” — Maximilian Alber</p><p><br></p><p>“One needs to build a technology that either makes a difference in the long run, or one must be able to innovate at a very fast pace.” — Maximilian Alber</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/maximilian-alber-038092194/">Maximilian Alber on LinkedIn</a></p><p><a href="https://www.aignostics.com/">Aignostics</a></p><p><a href="https://www.linkedin.com/company/aignostics/">Aignostics on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Today, I am joined by Maximilian Alber, Co-founder and CTO of Aignostics, to talk about pathology for precision medicine. You’ll learn about Aignostics’s mission, how they are impacting healthcare, and the transformative power of foundational models. Max explains how Aignostics is driven by the belief that machine learning and data science will help improve healthcare before expanding on the role of foundational models. He describes how they built their foundational model, what sets it apart from other models, and why diversity in their datasets is key. He also breaks down how foundational models have allowed them to develop other models more quickly and better navigate explainability with concepts that are challenging for machine learning. We wrap up with Max’s advice for leaders of other AI-powered startups and where he expects Aignostics will be in the next five years. Tune in now to learn all about foundational models and the innovative work being done at Aignostics!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into Max’s role at Aignostics and how the company is impacting healthcare.</li><li>How they use machine learning to set themselves apart from their competitors.</li><li>A rundown of their models and datasets.</li><li>The definition of a foundation model and how Aignostics built theirs.</li><li>How to use foundation models as a starting point for building machine learning applications.</li><li>What sets Aignostics’ foundation model for histopathology apart from other similar models.</li><li>How their foundation model enables them to develop other models more quickly.</li><li>Top lessons Max has learned from developing foundation models.</li><li>How they navigate explainability with concepts that are challenging for machine learning.</li><li>The positive impact that foundational models have had on explainability.</li><li>Recent advancements that Max is excited about as potential use cases for Aignostics.</li><li>Max’s advice to leaders of other AI-powered startups.</li><li>The impact of Aignostics and where he expects it will be in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our mission is to turn biomedical data into insights.” — Maximilian Alber</p><p><br></p><p>“Everything we do is driven by the belief that machine learning and data science will help us improve healthcare.” — Maximilian Alber</p><p><br></p><p>“A foundation model is a model that can be used as a starting point for building a machine learning application, with the promise that the foundation model already has a great understanding of the domain.” — Maximilian Alber</p><p><br></p><p>“We are in active discussions for licensing our foundation model to other companies in order to enable their development as well. [What’s] important here is that we develop our foundation model along regulatory requirements, which will allow it to be used in medical products.” — Maximilian Alber</p><p><br></p><p>“One needs to build a technology that either makes a difference in the long run, or one must be able to innovate at a very fast pace.” — Maximilian Alber</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/maximilian-alber-038092194/">Maximilian Alber on LinkedIn</a></p><p><a href="https://www.aignostics.com/">Aignostics</a></p><p><a href="https://www.linkedin.com/company/aignostics/">Aignostics on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 20 May 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/f5e890a2/17344c16.mp3" length="28216965" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/hW4wj_pcisuH5YB2dY1wVreWht3FQCc_wk-joBnXXdM/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS85OGE5/NTFkZWE3YWE1OTNm/NjEzYjYyNzI3MjQx/YjYyOC5qcGVn.jpg"/>
      <itunes:duration>1175</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Today, I am joined by Maximilian Alber, Co-founder and CTO of Aignostics, to talk about pathology for precision medicine. You’ll learn about Aignostics’s mission, how they are impacting healthcare, and the transformative power of foundational models. Max explains how Aignostics is driven by the belief that machine learning and data science will help improve healthcare before expanding on the role of foundational models. He describes how they built their foundational model, what sets it apart from other models, and why diversity in their datasets is key. He also breaks down how foundational models have allowed them to develop other models more quickly and better navigate explainability with concepts that are challenging for machine learning. We wrap up with Max’s advice for leaders of other AI-powered startups and where he expects Aignostics will be in the next five years. Tune in now to learn all about foundational models and the innovative work being done at Aignostics!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into Max’s role at Aignostics and how the company is impacting healthcare.</li><li>How they use machine learning to set themselves apart from their competitors.</li><li>A rundown of their models and datasets.</li><li>The definition of a foundation model and how Aignostics built theirs.</li><li>How to use foundation models as a starting point for building machine learning applications.</li><li>What sets Aignostics’ foundation model for histopathology apart from other similar models.</li><li>How their foundation model enables them to develop other models more quickly.</li><li>Top lessons Max has learned from developing foundation models.</li><li>How they navigate explainability with concepts that are challenging for machine learning.</li><li>The positive impact that foundational models have had on explainability.</li><li>Recent advancements that Max is excited about as potential use cases for Aignostics.</li><li>Max’s advice to leaders of other AI-powered startups.</li><li>The impact of Aignostics and where he expects it will be in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our mission is to turn biomedical data into insights.” — Maximilian Alber</p><p><br></p><p>“Everything we do is driven by the belief that machine learning and data science will help us improve healthcare.” — Maximilian Alber</p><p><br></p><p>“A foundation model is a model that can be used as a starting point for building a machine learning application, with the promise that the foundation model already has a great understanding of the domain.” — Maximilian Alber</p><p><br></p><p>“We are in active discussions for licensing our foundation model to other companies in order to enable their development as well. [What’s] important here is that we develop our foundation model along regulatory requirements, which will allow it to be used in medical products.” — Maximilian Alber</p><p><br></p><p>“One needs to build a technology that either makes a difference in the long run, or one must be able to innovate at a very fast pace.” — Maximilian Alber</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/maximilian-alber-038092194/">Maximilian Alber on LinkedIn</a></p><p><a href="https://www.aignostics.com/">Aignostics</a></p><p><a href="https://www.linkedin.com/company/aignostics/">Aignostics on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>pathology, computer vision, precision medicine, pathology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/f5e890a2/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Subseasonal-to-Seasonal Weather Forecasting with Sam Levang from Salient Predictions</title>
      <itunes:episode>82</itunes:episode>
      <podcast:episode>82</podcast:episode>
      <itunes:title>Subseasonal-to-Seasonal Weather Forecasting with Sam Levang from Salient Predictions</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">dc18c558-171b-43ad-b70b-6a11acfd9bd9</guid>
      <link>https://pixelscientia.com/podcast/subseasonal-to-seasonal-weather-forecasting-with-sam-levang-from-salient-predictions/</link>
      <description>
        <![CDATA[<p>Advanced weather forecasts are the new frontier in meteorology. Long-term forecasting has garnered significant attention due to its potential to provide valuable insights to various sectors of society and the economy. In today’s episode, Sam Levang, Chief Scientist at Salient, joins me to discuss Salient’s innovative approach to weather forecasting. Salient specializes in providing highly accurate subseasonal-to-seasonal weather forecasts ranging from 2 to 52 weeks in advance.</p><p>In our conversation, we discuss the ins and outs of the company’s innovative approach to weather forecasting. We delve into the hurdles of subseasonal-to-seasonal forecasting, how machine learning is replacing traditional weather modeling approaches, and the various inputs it uses. Discover the value of machine learning for post-processing of data, the type of data the company utilizes, and why it uses probabilistic models in its approach. Gain insights into how Salient is catering to the impacts of climate change in its weather predictions, the company’s approach to validation, how AI has made it all possible, and much more!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Sam's background in science and the creation of Salient.</li><li>Hear how Salient is revolutionizing weather forecasting and why.</li><li>How Salient is utilizing machine learning in its forecasting models.</li><li>Examples of the data and models the company uses.</li><li>The challenges of working with weather data to build models.</li><li>Explore why Salient also uses probabilistic models in its approach.</li><li>Salient’s approach to validation and how it deals with data uncertainty.</li><li>Ways AI has made the company’s approach to forecasting possible. </li><li>He shares advice for leaders of other AI-powered startups.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Salient produces weather forecasts that extend further into the future than most people are used to seeing. We go up to a year in advance.” — Sam Levang</p><p><br></p><p>“ML (Machine Learning) models have proved to be actually a very effective replacement for the traditional approach to weather modeling.” — Sam Levang</p><p><br></p><p>“The only difference about making forecasts longer timescales of weeks and months ahead is that there are some differences in the particular parts of the climate system that provide the most predictability.” — Sam Levang</p><p><br></p><p>“While ML and AI are extremely powerful tools, they are still just tools and there's so much else that goes into building a really valuable product, or a service, or a company.” — Sam Levang</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/slevang/">Sam Levang on LinkedIn </a></p><p><a href="https://www.salientpredictions.com">Salient</a> </p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Advanced weather forecasts are the new frontier in meteorology. Long-term forecasting has garnered significant attention due to its potential to provide valuable insights to various sectors of society and the economy. In today’s episode, Sam Levang, Chief Scientist at Salient, joins me to discuss Salient’s innovative approach to weather forecasting. Salient specializes in providing highly accurate subseasonal-to-seasonal weather forecasts ranging from 2 to 52 weeks in advance.</p><p>In our conversation, we discuss the ins and outs of the company’s innovative approach to weather forecasting. We delve into the hurdles of subseasonal-to-seasonal forecasting, how machine learning is replacing traditional weather modeling approaches, and the various inputs it uses. Discover the value of machine learning for post-processing of data, the type of data the company utilizes, and why it uses probabilistic models in its approach. Gain insights into how Salient is catering to the impacts of climate change in its weather predictions, the company’s approach to validation, how AI has made it all possible, and much more!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Sam's background in science and the creation of Salient.</li><li>Hear how Salient is revolutionizing weather forecasting and why.</li><li>How Salient is utilizing machine learning in its forecasting models.</li><li>Examples of the data and models the company uses.</li><li>The challenges of working with weather data to build models.</li><li>Explore why Salient also uses probabilistic models in its approach.</li><li>Salient’s approach to validation and how it deals with data uncertainty.</li><li>Ways AI has made the company’s approach to forecasting possible. </li><li>He shares advice for leaders of other AI-powered startups.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Salient produces weather forecasts that extend further into the future than most people are used to seeing. We go up to a year in advance.” — Sam Levang</p><p><br></p><p>“ML (Machine Learning) models have proved to be actually a very effective replacement for the traditional approach to weather modeling.” — Sam Levang</p><p><br></p><p>“The only difference about making forecasts longer timescales of weeks and months ahead is that there are some differences in the particular parts of the climate system that provide the most predictability.” — Sam Levang</p><p><br></p><p>“While ML and AI are extremely powerful tools, they are still just tools and there's so much else that goes into building a really valuable product, or a service, or a company.” — Sam Levang</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/slevang/">Sam Levang on LinkedIn </a></p><p><a href="https://www.salientpredictions.com">Salient</a> </p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 13 May 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/ecb6c103/7eb96ab1.mp3" length="24347419" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/cZ7AapOvhl7cSzJDN7rDby1tSOdUieQ6KyLyd6d95Wk/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS80YjM5/NzQ4N2E4YjFlMTU5/ZWU0ZjgzNDJlNjIx/YWFhNy5qcGVn.jpg"/>
      <itunes:duration>1011</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Advanced weather forecasts are the new frontier in meteorology. Long-term forecasting has garnered significant attention due to its potential to provide valuable insights to various sectors of society and the economy. In today’s episode, Sam Levang, Chief Scientist at Salient, joins me to discuss Salient’s innovative approach to weather forecasting. Salient specializes in providing highly accurate subseasonal-to-seasonal weather forecasts ranging from 2 to 52 weeks in advance.</p><p>In our conversation, we discuss the ins and outs of the company’s innovative approach to weather forecasting. We delve into the hurdles of subseasonal-to-seasonal forecasting, how machine learning is replacing traditional weather modeling approaches, and the various inputs it uses. Discover the value of machine learning for post-processing of data, the type of data the company utilizes, and why it uses probabilistic models in its approach. Gain insights into how Salient is catering to the impacts of climate change in its weather predictions, the company’s approach to validation, how AI has made it all possible, and much more!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Sam's background in science and the creation of Salient.</li><li>Hear how Salient is revolutionizing weather forecasting and why.</li><li>How Salient is utilizing machine learning in its forecasting models.</li><li>Examples of the data and models the company uses.</li><li>The challenges of working with weather data to build models.</li><li>Explore why Salient also uses probabilistic models in its approach.</li><li>Salient’s approach to validation and how it deals with data uncertainty.</li><li>Ways AI has made the company’s approach to forecasting possible. </li><li>He shares advice for leaders of other AI-powered startups.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Salient produces weather forecasts that extend further into the future than most people are used to seeing. We go up to a year in advance.” — Sam Levang</p><p><br></p><p>“ML (Machine Learning) models have proved to be actually a very effective replacement for the traditional approach to weather modeling.” — Sam Levang</p><p><br></p><p>“The only difference about making forecasts longer timescales of weeks and months ahead is that there are some differences in the particular parts of the climate system that provide the most predictability.” — Sam Levang</p><p><br></p><p>“While ML and AI are extremely powerful tools, they are still just tools and there's so much else that goes into building a really valuable product, or a service, or a company.” — Sam Levang</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/slevang/">Sam Levang on LinkedIn </a></p><p><a href="https://www.salientpredictions.com">Salient</a> </p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, weather, subseasonal forecasting</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/ecb6c103/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Virtual Tissue Staining with Yair Rivenson from PictorLabs</title>
      <itunes:episode>81</itunes:episode>
      <podcast:episode>81</podcast:episode>
      <itunes:title>Virtual Tissue Staining with Yair Rivenson from PictorLabs</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">fef3eebc-7eea-4fe7-9f5b-9cb4f1e67a38</guid>
      <link>https://pixelscientia.com/podcast/virtual-tissue-staining-with-yair-rivenson-from-pictorlabs/</link>
      <description>
        <![CDATA[<p>Welcome to today’s episode of Impact AI, where we dive into the groundbreaking world of virtual tissue staining with Yair Rivenson, the co-founder and CEO of PictorLabs, a digital pathology company advancing AI-powered virtual staining technology to revolutionize histopathology and accelerate clinical research to improve patient outcomes. You’ll find out how machine learning is used to translate unstained tissue autofluorescence into diagnostic-ready images, gain insight into overcoming AI hallucinations and the rigorous validation processes behind virtual staining models, and discover how PictorLabs navigates challenges like large files and bandwidth dependency while seamlessly integrating technology into clinical workflows. Yair also provides invaluable advice for AI-powered startup leaders, emphasizing the importance of automation and data quality. To gain deeper insights into the transformative potential of virtual tissue staining, tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>The origin story of PictorLabs and the research that informed it.</li><li>Why Pictor’s work is so important for patients and the healthcare system.</li><li>What Yair means when he says machine learning is the “engine” for virtual staining.</li><li>How Pictor mitigates the challenge of AI hallucinations.</li><li>Insight into what goes into validating virtual staining models.</li><li>Large files, bandwidth dependency, and other challenges that Pictor faces.</li><li>A look at how this technology fits smoothly into the clinical workflow.</li><li>Collaborating with economic partners while staying focused on business objectives.</li><li>Yair’s product-focused advice for leaders of AI-powered startups</li><li>What the next three to five years looks like for PictorLabs.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“The most important factor for the healthcare system, for the patient is the fact that you can get all the results, all the workup, and all the different stains from a single tissue section very, very fast.” — Yair Rivenson</p><p><br></p><p>“Machine learning is the engine behind virtual staining. In a sense, that’s what takes those images from the autofluorescence of the unstained tissue section and converts [them] into a stain that pathologists can use for their diagnostics.” — Yair Rivenson</p><p><br></p><p>“At the end of the day, the network is as good as the data that it learns from.” — Yair Rivenson</p><p><br></p><p>“The more you automate, the better off you’ll be in the long run.” — Yair Rivenson</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://scholar.google.com/citations?user=gaktRVgAAAAJ&amp;hl=en">Yair Rivenson</a></p><p><a href="https://pictorlabs.ai/">PictorLabs</a></p><p><a href="https://www.linkedin.com/company/pictorlabs/">PictorLabs on LinkedIn</a></p><p><a href="https://www.nature.com/articles/s41551-019-0362-y">‘Virtual histological staining of unlabelled tissue-autofluorescence images via deep learning’</a></p><p><a href="https://websitev0.s3.us-west-1.amazonaws.com/Pictor+and+Univ+Maryland+USCAP+poster+-+March+2024+(3).pdf">‘Assessment of AI Computational H&amp;E Staining Versus Chemical H&amp;E Staining For Primary Diagnosis in Lymphomas’</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Welcome to today’s episode of Impact AI, where we dive into the groundbreaking world of virtual tissue staining with Yair Rivenson, the co-founder and CEO of PictorLabs, a digital pathology company advancing AI-powered virtual staining technology to revolutionize histopathology and accelerate clinical research to improve patient outcomes. You’ll find out how machine learning is used to translate unstained tissue autofluorescence into diagnostic-ready images, gain insight into overcoming AI hallucinations and the rigorous validation processes behind virtual staining models, and discover how PictorLabs navigates challenges like large files and bandwidth dependency while seamlessly integrating technology into clinical workflows. Yair also provides invaluable advice for AI-powered startup leaders, emphasizing the importance of automation and data quality. To gain deeper insights into the transformative potential of virtual tissue staining, tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>The origin story of PictorLabs and the research that informed it.</li><li>Why Pictor’s work is so important for patients and the healthcare system.</li><li>What Yair means when he says machine learning is the “engine” for virtual staining.</li><li>How Pictor mitigates the challenge of AI hallucinations.</li><li>Insight into what goes into validating virtual staining models.</li><li>Large files, bandwidth dependency, and other challenges that Pictor faces.</li><li>A look at how this technology fits smoothly into the clinical workflow.</li><li>Collaborating with economic partners while staying focused on business objectives.</li><li>Yair’s product-focused advice for leaders of AI-powered startups</li><li>What the next three to five years looks like for PictorLabs.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“The most important factor for the healthcare system, for the patient is the fact that you can get all the results, all the workup, and all the different stains from a single tissue section very, very fast.” — Yair Rivenson</p><p><br></p><p>“Machine learning is the engine behind virtual staining. In a sense, that’s what takes those images from the autofluorescence of the unstained tissue section and converts [them] into a stain that pathologists can use for their diagnostics.” — Yair Rivenson</p><p><br></p><p>“At the end of the day, the network is as good as the data that it learns from.” — Yair Rivenson</p><p><br></p><p>“The more you automate, the better off you’ll be in the long run.” — Yair Rivenson</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://scholar.google.com/citations?user=gaktRVgAAAAJ&amp;hl=en">Yair Rivenson</a></p><p><a href="https://pictorlabs.ai/">PictorLabs</a></p><p><a href="https://www.linkedin.com/company/pictorlabs/">PictorLabs on LinkedIn</a></p><p><a href="https://www.nature.com/articles/s41551-019-0362-y">‘Virtual histological staining of unlabelled tissue-autofluorescence images via deep learning’</a></p><p><a href="https://websitev0.s3.us-west-1.amazonaws.com/Pictor+and+Univ+Maryland+USCAP+poster+-+March+2024+(3).pdf">‘Assessment of AI Computational H&amp;E Staining Versus Chemical H&amp;E Staining For Primary Diagnosis in Lymphomas’</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 06 May 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/0d65c925/71e300a2.mp3" length="32888087" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/uVP1lMPW1f_DFw5XiWHlb3Y9GfXmqko1gsdOFBY_Ubc/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS80YThi/ZjAyOTRlNDRhOWMy/NGQ5YTU0YmM3MjYx/OTA4Ny5qcGVn.jpg"/>
      <itunes:duration>2051</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Welcome to today’s episode of Impact AI, where we dive into the groundbreaking world of virtual tissue staining with Yair Rivenson, the co-founder and CEO of PictorLabs, a digital pathology company advancing AI-powered virtual staining technology to revolutionize histopathology and accelerate clinical research to improve patient outcomes. You’ll find out how machine learning is used to translate unstained tissue autofluorescence into diagnostic-ready images, gain insight into overcoming AI hallucinations and the rigorous validation processes behind virtual staining models, and discover how PictorLabs navigates challenges like large files and bandwidth dependency while seamlessly integrating technology into clinical workflows. Yair also provides invaluable advice for AI-powered startup leaders, emphasizing the importance of automation and data quality. To gain deeper insights into the transformative potential of virtual tissue staining, tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>The origin story of PictorLabs and the research that informed it.</li><li>Why Pictor’s work is so important for patients and the healthcare system.</li><li>What Yair means when he says machine learning is the “engine” for virtual staining.</li><li>How Pictor mitigates the challenge of AI hallucinations.</li><li>Insight into what goes into validating virtual staining models.</li><li>Large files, bandwidth dependency, and other challenges that Pictor faces.</li><li>A look at how this technology fits smoothly into the clinical workflow.</li><li>Collaborating with economic partners while staying focused on business objectives.</li><li>Yair’s product-focused advice for leaders of AI-powered startups</li><li>What the next three to five years looks like for PictorLabs.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“The most important factor for the healthcare system, for the patient is the fact that you can get all the results, all the workup, and all the different stains from a single tissue section very, very fast.” — Yair Rivenson</p><p><br></p><p>“Machine learning is the engine behind virtual staining. In a sense, that’s what takes those images from the autofluorescence of the unstained tissue section and converts [them] into a stain that pathologists can use for their diagnostics.” — Yair Rivenson</p><p><br></p><p>“At the end of the day, the network is as good as the data that it learns from.” — Yair Rivenson</p><p><br></p><p>“The more you automate, the better off you’ll be in the long run.” — Yair Rivenson</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://scholar.google.com/citations?user=gaktRVgAAAAJ&amp;hl=en">Yair Rivenson</a></p><p><a href="https://pictorlabs.ai/">PictorLabs</a></p><p><a href="https://www.linkedin.com/company/pictorlabs/">PictorLabs on LinkedIn</a></p><p><a href="https://www.nature.com/articles/s41551-019-0362-y">‘Virtual histological staining of unlabelled tissue-autofluorescence images via deep learning’</a></p><p><a href="https://websitev0.s3.us-west-1.amazonaws.com/Pictor+and+Univ+Maryland+USCAP+poster+-+March+2024+(3).pdf">‘Assessment of AI Computational H&amp;E Staining Versus Chemical H&amp;E Staining For Primary Diagnosis in Lymphomas’</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>pathology, computer vision, machine learning, virtual staining</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/0d65c925/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Improving Recycling Efficiency with Nikola Sivacki from Greyparrot</title>
      <itunes:episode>80</itunes:episode>
      <podcast:episode>80</podcast:episode>
      <itunes:title>Improving Recycling Efficiency with Nikola Sivacki from Greyparrot</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">3d0c9454-7b88-4ee2-bc58-0eb9efc82592</guid>
      <link>https://pixelscientia.com/podcast/improving-recycling-efficiency-with-nikola-sivacki-from-greyparrot/</link>
      <description>
        <![CDATA[<p>One of the most powerful impacts machine learning can make is helping to solve environmental challenges all around the world. Today on Impact AI, I am joined by the founder of Greyparrot, Nikola Sivacki to discuss how his company uses machine learning to improve recycling efficiency. Learn all about Nikola’s background, what Greyparrot does, their services, the importance of their work, the role machine learning plays in it, how they gather and annotate data, the challenges they face, how they develop new models, and so much more. Tune in to hear the newest AI innovations Nikola is most excited about before hearing his goals for Greyparrot in the near future. Lastly, get some valuable advice for running AI-powered startups.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Welcoming Nikola Sivacki to the show. </li><li>Nikola shares a bit about his background and how it led him to create Greyparrot. </li><li>What Greyparrot does, what services they offer, and why it is so important. </li><li>The role machine learning plays in this technology. </li><li>How they go about gathering data and annotating it for their purposes. </li><li>What they are trying to predict with the data they are gathering. </li><li>Challenges they encounter in training machine learning models and how to overcome them.</li><li>A breakdown of how his team plans and develops a new machine learning model or feature. </li><li>Nikola shares how Greyparrot measures the impact of its technology. </li><li>The two groups of machine learning developments Nikola is most excited about. </li><li>Nikola shares some advice for other leaders of AI-powered startups. </li><li>Where he sees the impact of Greyparrot in three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Greyparrot basically monitors the flow of waste materials, recyclable materials in material recovery facilities, and offers compositional analysis of these materials.” — Nikola Sivacki</p><p><br></p><p>“It's very helpful, – if thinking of a new product, to start with a data set that is really tailored to answering the main uncertain question that is posed there.” — Nikola Sivacki</p><p><br></p><p>“Start thinking about data from the start. I think that it’s very important to understand the data in detail.” — Nikola Sivacki</p><p>“Our goal is to improve, of course, recycling rates globally so that we can reduce reliance on virgin materials.” — Nikola Sivacki</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/nikolasivacki/">Nikola Sivacki on LinkedIn</a></p><p><a href="https://twitter.com/nsivacki">Nikola Sivacki on X</a></p><p><a href="https://www.greyparrot.ai/">Greyparrot</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>One of the most powerful impacts machine learning can make is helping to solve environmental challenges all around the world. Today on Impact AI, I am joined by the founder of Greyparrot, Nikola Sivacki to discuss how his company uses machine learning to improve recycling efficiency. Learn all about Nikola’s background, what Greyparrot does, their services, the importance of their work, the role machine learning plays in it, how they gather and annotate data, the challenges they face, how they develop new models, and so much more. Tune in to hear the newest AI innovations Nikola is most excited about before hearing his goals for Greyparrot in the near future. Lastly, get some valuable advice for running AI-powered startups.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Welcoming Nikola Sivacki to the show. </li><li>Nikola shares a bit about his background and how it led him to create Greyparrot. </li><li>What Greyparrot does, what services they offer, and why it is so important. </li><li>The role machine learning plays in this technology. </li><li>How they go about gathering data and annotating it for their purposes. </li><li>What they are trying to predict with the data they are gathering. </li><li>Challenges they encounter in training machine learning models and how to overcome them.</li><li>A breakdown of how his team plans and develops a new machine learning model or feature. </li><li>Nikola shares how Greyparrot measures the impact of its technology. </li><li>The two groups of machine learning developments Nikola is most excited about. </li><li>Nikola shares some advice for other leaders of AI-powered startups. </li><li>Where he sees the impact of Greyparrot in three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Greyparrot basically monitors the flow of waste materials, recyclable materials in material recovery facilities, and offers compositional analysis of these materials.” — Nikola Sivacki</p><p><br></p><p>“It's very helpful, – if thinking of a new product, to start with a data set that is really tailored to answering the main uncertain question that is posed there.” — Nikola Sivacki</p><p><br></p><p>“Start thinking about data from the start. I think that it’s very important to understand the data in detail.” — Nikola Sivacki</p><p>“Our goal is to improve, of course, recycling rates globally so that we can reduce reliance on virgin materials.” — Nikola Sivacki</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/nikolasivacki/">Nikola Sivacki on LinkedIn</a></p><p><a href="https://twitter.com/nsivacki">Nikola Sivacki on X</a></p><p><a href="https://www.greyparrot.ai/">Greyparrot</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 29 Apr 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/b0f0563a/be378c2a.mp3" length="19770549" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/lBB3AJnePR0z_8xux_JLyAp07cy91hvB1I_SYDEN3DA/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lM2Uz/ZWEzYzNmZjI4NmNh/MmEzMGE1YWUwNTJj/MmFmOS5qcGVn.jpg"/>
      <itunes:duration>1231</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>One of the most powerful impacts machine learning can make is helping to solve environmental challenges all around the world. Today on Impact AI, I am joined by the founder of Greyparrot, Nikola Sivacki to discuss how his company uses machine learning to improve recycling efficiency. Learn all about Nikola’s background, what Greyparrot does, their services, the importance of their work, the role machine learning plays in it, how they gather and annotate data, the challenges they face, how they develop new models, and so much more. Tune in to hear the newest AI innovations Nikola is most excited about before hearing his goals for Greyparrot in the near future. Lastly, get some valuable advice for running AI-powered startups.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Welcoming Nikola Sivacki to the show. </li><li>Nikola shares a bit about his background and how it led him to create Greyparrot. </li><li>What Greyparrot does, what services they offer, and why it is so important. </li><li>The role machine learning plays in this technology. </li><li>How they go about gathering data and annotating it for their purposes. </li><li>What they are trying to predict with the data they are gathering. </li><li>Challenges they encounter in training machine learning models and how to overcome them.</li><li>A breakdown of how his team plans and develops a new machine learning model or feature. </li><li>Nikola shares how Greyparrot measures the impact of its technology. </li><li>The two groups of machine learning developments Nikola is most excited about. </li><li>Nikola shares some advice for other leaders of AI-powered startups. </li><li>Where he sees the impact of Greyparrot in three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Greyparrot basically monitors the flow of waste materials, recyclable materials in material recovery facilities, and offers compositional analysis of these materials.” — Nikola Sivacki</p><p><br></p><p>“It's very helpful, – if thinking of a new product, to start with a data set that is really tailored to answering the main uncertain question that is posed there.” — Nikola Sivacki</p><p><br></p><p>“Start thinking about data from the start. I think that it’s very important to understand the data in detail.” — Nikola Sivacki</p><p>“Our goal is to improve, of course, recycling rates globally so that we can reduce reliance on virgin materials.” — Nikola Sivacki</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/nikolasivacki/">Nikola Sivacki on LinkedIn</a></p><p><a href="https://twitter.com/nsivacki">Nikola Sivacki on X</a></p><p><a href="https://www.greyparrot.ai/">Greyparrot</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>computer vision, machine learning, recycling</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/b0f0563a/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Discovering the Microbiome with Leo Grady from Jona</title>
      <itunes:episode>79</itunes:episode>
      <podcast:episode>79</podcast:episode>
      <itunes:title>Discovering the Microbiome with Leo Grady from Jona</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">b004cdac-facb-445b-9c2a-107f3ecda989</guid>
      <link>https://pixelscientia.com/podcast/discovering-the-microbiome-with-leo-grady-from-jona/</link>
      <description>
        <![CDATA[<p>What if AI could decode the mysteries of your microbiome for a healthier you? In this episode, I sit down with Leo Grady, Founder and CEO of Jona, to discuss his groundbreaking work in microbiome research. Jona is a health technology company that specializes in microbiome profiling and analysis. It offers microbiome testing kits for individuals to use at home, along with AI-powered analysis of the associated microbiome data. In our conversation, we delve into the human microbiome and how Jona is harnessing the power of AI to unlock its secrets and revolutionize healthcare practices. Discover how Jona bridges the gap between research and clinical practice and utilizes deep shotgun metagenomic sequencing. We discuss why he thinks AI is a critical technology for decoding the microbiome, how Jona is able to connect research findings to microbiome profiles, and the company’s approach to model validation. Gain insights into the evolving landscape of AI in healthcare, the number one barrier to clinical translation and adoption of AI technology, what needs to be done to overcome it, and much more.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background about Leo and what motivated him to start Jona.</li><li>He explains the complexity of the microbiome and its role in human health.</li><li>Hear more about Jona and how the company leverages AI for data analysis.</li><li>How Jona applies models to analyze microbiome data and medical literature.</li><li>The technical nuances and validation processes behind the company’s approach.</li><li>Learn about the challenges of building models to elucidate microbiome data.</li><li>Explore the intricacies of validating the company’s groundbreaking technology.</li><li>Advancements in AI and machine learning that he is most excited about.</li><li>Leo shares advice for leaders of AI-powered startups.</li><li>Uncover the number one barrier to AI adoption: payment. </li><li>What the future looks like for Jona and what the company aims to achieve.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“What's really remarkable to me about the microbiome is that it's been linked to almost every aspect of human health.” — Leo Grady</p><p>“There are a lot of challenges that forced us to really develop new kinds of [machine learning] techniques that are really suited to this problem. We can't just rely on taking what's out there today.” — Leo Grady</p><p><br></p><p>“The AI is doing that extraction. We have human oversight to make corrections to it. But once that paper has been extracted correctly, then we don't need to look at it again. It’s a one-time review process on every study.” — Leo Grady</p><p><br></p><p>“I think the biggest challenges with AI and healthcare today are no longer technical, and they're no longer regulatory. The fact is that with current AI technology and enough data, we can solve almost any AI problem that we want to.” — Leo Grady</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/leo-grady-a745517/">Leo Grady on LinkedIn</a></p><p><a href="https://jona.health">Jona</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>What if AI could decode the mysteries of your microbiome for a healthier you? In this episode, I sit down with Leo Grady, Founder and CEO of Jona, to discuss his groundbreaking work in microbiome research. Jona is a health technology company that specializes in microbiome profiling and analysis. It offers microbiome testing kits for individuals to use at home, along with AI-powered analysis of the associated microbiome data. In our conversation, we delve into the human microbiome and how Jona is harnessing the power of AI to unlock its secrets and revolutionize healthcare practices. Discover how Jona bridges the gap between research and clinical practice and utilizes deep shotgun metagenomic sequencing. We discuss why he thinks AI is a critical technology for decoding the microbiome, how Jona is able to connect research findings to microbiome profiles, and the company’s approach to model validation. Gain insights into the evolving landscape of AI in healthcare, the number one barrier to clinical translation and adoption of AI technology, what needs to be done to overcome it, and much more.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background about Leo and what motivated him to start Jona.</li><li>He explains the complexity of the microbiome and its role in human health.</li><li>Hear more about Jona and how the company leverages AI for data analysis.</li><li>How Jona applies models to analyze microbiome data and medical literature.</li><li>The technical nuances and validation processes behind the company’s approach.</li><li>Learn about the challenges of building models to elucidate microbiome data.</li><li>Explore the intricacies of validating the company’s groundbreaking technology.</li><li>Advancements in AI and machine learning that he is most excited about.</li><li>Leo shares advice for leaders of AI-powered startups.</li><li>Uncover the number one barrier to AI adoption: payment. </li><li>What the future looks like for Jona and what the company aims to achieve.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“What's really remarkable to me about the microbiome is that it's been linked to almost every aspect of human health.” — Leo Grady</p><p>“There are a lot of challenges that forced us to really develop new kinds of [machine learning] techniques that are really suited to this problem. We can't just rely on taking what's out there today.” — Leo Grady</p><p><br></p><p>“The AI is doing that extraction. We have human oversight to make corrections to it. But once that paper has been extracted correctly, then we don't need to look at it again. It’s a one-time review process on every study.” — Leo Grady</p><p><br></p><p>“I think the biggest challenges with AI and healthcare today are no longer technical, and they're no longer regulatory. The fact is that with current AI technology and enough data, we can solve almost any AI problem that we want to.” — Leo Grady</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/leo-grady-a745517/">Leo Grady on LinkedIn</a></p><p><a href="https://jona.health">Jona</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 22 Apr 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/6f34cd40/1770afaf.mp3" length="22156600" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/zbb4DTDglBEhDDhgSZtYvg9SrbWkCm14w_0OocSQxw0/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS85OWYy/NzA0ZDk2OTRlZDE4/NDYxZWU4OGQwZDk3/MmVkMC5qcGVn.jpg"/>
      <itunes:duration>1381</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>What if AI could decode the mysteries of your microbiome for a healthier you? In this episode, I sit down with Leo Grady, Founder and CEO of Jona, to discuss his groundbreaking work in microbiome research. Jona is a health technology company that specializes in microbiome profiling and analysis. It offers microbiome testing kits for individuals to use at home, along with AI-powered analysis of the associated microbiome data. In our conversation, we delve into the human microbiome and how Jona is harnessing the power of AI to unlock its secrets and revolutionize healthcare practices. Discover how Jona bridges the gap between research and clinical practice and utilizes deep shotgun metagenomic sequencing. We discuss why he thinks AI is a critical technology for decoding the microbiome, how Jona is able to connect research findings to microbiome profiles, and the company’s approach to model validation. Gain insights into the evolving landscape of AI in healthcare, the number one barrier to clinical translation and adoption of AI technology, what needs to be done to overcome it, and much more.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background about Leo and what motivated him to start Jona.</li><li>He explains the complexity of the microbiome and its role in human health.</li><li>Hear more about Jona and how the company leverages AI for data analysis.</li><li>How Jona applies models to analyze microbiome data and medical literature.</li><li>The technical nuances and validation processes behind the company’s approach.</li><li>Learn about the challenges of building models to elucidate microbiome data.</li><li>Explore the intricacies of validating the company’s groundbreaking technology.</li><li>Advancements in AI and machine learning that he is most excited about.</li><li>Leo shares advice for leaders of AI-powered startups.</li><li>Uncover the number one barrier to AI adoption: payment. </li><li>What the future looks like for Jona and what the company aims to achieve.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“What's really remarkable to me about the microbiome is that it's been linked to almost every aspect of human health.” — Leo Grady</p><p>“There are a lot of challenges that forced us to really develop new kinds of [machine learning] techniques that are really suited to this problem. We can't just rely on taking what's out there today.” — Leo Grady</p><p><br></p><p>“The AI is doing that extraction. We have human oversight to make corrections to it. But once that paper has been extracted correctly, then we don't need to look at it again. It’s a one-time review process on every study.” — Leo Grady</p><p><br></p><p>“I think the biggest challenges with AI and healthcare today are no longer technical, and they're no longer regulatory. The fact is that with current AI technology and enough data, we can solve almost any AI problem that we want to.” — Leo Grady</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/leo-grady-a745517/">Leo Grady on LinkedIn</a></p><p><a href="https://jona.health">Jona</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, microbiome, gut health, LLMs</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/6f34cd40/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Monitoring Forests with David Marvin from Planet</title>
      <itunes:episode>78</itunes:episode>
      <podcast:episode>78</podcast:episode>
      <itunes:title>Monitoring Forests with David Marvin from Planet</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">f50070a8-0006-4c3c-99ec-68bea645140c</guid>
      <link>https://pixelscientia.com/podcast/monitoring-forests-with-david-marvin-from-planet/</link>
      <description>
        <![CDATA[<p>Bringing transparency and accuracy to the marketplace by producing high-quality data on all types of hard problems is a main focus for today’s guest and the company he works for. I am pleased to welcome David Marvin to Impact AI. David was the Co-Founder and CEO of Salo Sciences, which was acquired by Planet last year, and is now the Product Lead for Forest Ecosystems there! He joins me today to talk about monitoring forests. We delve into his background and path to Salo Sciences and their eventual acquisition by Planet; including the original mission and vision and what they worked to accomplish at Salo. David then explains his goals and focus at Planet, and unpacks the types of satellite imagery, models, and sensors they incorporate into their data and outputs. He highlights their approach to validation, how they are reducing bias, and how they are integrating extensive knowledge to empower their machine learning developers to create powerful models.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>David shares details about his background and path to Salo Sciences and Planet.</li><li>The original vision and mission of Salo Sciences and what they did there.</li><li>He explains how they leveraged large-scale airborne LiDAR collections and deep learning to create maps of vegetation fuels.</li><li>His goals and focus at Planet.</li><li>David unpacks the types of satellite imagery, models, and sensors they incorporate into their data and outputs.</li><li>How they validate that their models work in places where they do not have Airborne LiDAR.</li><li>Reducing the bias that results from only having data in a heterogeneous distribution of LiDAR sites around the world.</li><li>How they integrate their extensive knowledge to empower their machine-learning developers in creating powerful models.</li><li>The business benefits he’s seen from publishing and making it a priority.</li><li>His advice to other leaders of AI-powered startups.</li><li>His thoughts on the impact of the forest monitoring efforts at Planet in three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“A company like Planet was essentially probably the only company we would have really ever been acquired by just given their vision and the fact that they have their own satellites and we’re a satellite software company.” — David Marvin</p><p><br></p><p>“[At Salo Sciences] we leveraged high-quality airborne LiDAR measurements of forests all over California. Airborne LiDAR is one of these technologies, these sensors, that was on that airplane back in my post-doc lab. It shoots out hundreds of thousands of pulses of laser light per second and reflects back to the sensor, and it can basically recreate in three dimensions a forest, or a city, whatever your mapping target is. It's extremely precise. It's centimeter-level accuracy, and it's very high-quality data. We consider that the gold standard of forest measurement.” — David Marvin</p><p><br>“Ultimately, we want to produce a near-tree-level map of the world's forests, and we're well on our way to doing that and expect to be releasing that later this summer, or in the fall.” — David Marvin</p><p><br></p><p>“We approach the validation aspect from a few different angles, trying to source as many different independent data sets as possible to do validation. Then we also like to do comparisons to well-known public data sets; either from academia or from governments.” — David Marvin</p><p><br>“You really do have to have the three legs of the stool to be able to build a quality operational product that is meant for forest monitoring.” — David Marvin</p><p><br></p><p>“Making sure you have scientists on your team, making sure you're still active in the scientific publishing community, that you're up on the latest papers that are coming out, and basically acting like a scientist in an industry position is crucial to make any product work; especially in branding markets, like forest monitoring and carbon markets.” — David Marvin</p><p><br></p><p><strong>Links:</strong></p><p><a href="http://www.davidcmarvin.org/">David Marvin</a></p><p><a href="https://www.linkedin.com/in/dmarvs/">David Marvin on LinkedIn</a></p><p><a href="https://twitter.com/dmarvs">David Marvin on x</a></p><p><a href="https://salo.ai/">Salo Sciences</a></p><p><a href="https://www.planet.com/">Planet</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Bringing transparency and accuracy to the marketplace by producing high-quality data on all types of hard problems is a main focus for today’s guest and the company he works for. I am pleased to welcome David Marvin to Impact AI. David was the Co-Founder and CEO of Salo Sciences, which was acquired by Planet last year, and is now the Product Lead for Forest Ecosystems there! He joins me today to talk about monitoring forests. We delve into his background and path to Salo Sciences and their eventual acquisition by Planet; including the original mission and vision and what they worked to accomplish at Salo. David then explains his goals and focus at Planet, and unpacks the types of satellite imagery, models, and sensors they incorporate into their data and outputs. He highlights their approach to validation, how they are reducing bias, and how they are integrating extensive knowledge to empower their machine learning developers to create powerful models.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>David shares details about his background and path to Salo Sciences and Planet.</li><li>The original vision and mission of Salo Sciences and what they did there.</li><li>He explains how they leveraged large-scale airborne LiDAR collections and deep learning to create maps of vegetation fuels.</li><li>His goals and focus at Planet.</li><li>David unpacks the types of satellite imagery, models, and sensors they incorporate into their data and outputs.</li><li>How they validate that their models work in places where they do not have Airborne LiDAR.</li><li>Reducing the bias that results from only having data in a heterogeneous distribution of LiDAR sites around the world.</li><li>How they integrate their extensive knowledge to empower their machine-learning developers in creating powerful models.</li><li>The business benefits he’s seen from publishing and making it a priority.</li><li>His advice to other leaders of AI-powered startups.</li><li>His thoughts on the impact of the forest monitoring efforts at Planet in three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“A company like Planet was essentially probably the only company we would have really ever been acquired by just given their vision and the fact that they have their own satellites and we’re a satellite software company.” — David Marvin</p><p><br></p><p>“[At Salo Sciences] we leveraged high-quality airborne LiDAR measurements of forests all over California. Airborne LiDAR is one of these technologies, these sensors, that was on that airplane back in my post-doc lab. It shoots out hundreds of thousands of pulses of laser light per second and reflects back to the sensor, and it can basically recreate in three dimensions a forest, or a city, whatever your mapping target is. It's extremely precise. It's centimeter-level accuracy, and it's very high-quality data. We consider that the gold standard of forest measurement.” — David Marvin</p><p><br>“Ultimately, we want to produce a near-tree-level map of the world's forests, and we're well on our way to doing that and expect to be releasing that later this summer, or in the fall.” — David Marvin</p><p><br></p><p>“We approach the validation aspect from a few different angles, trying to source as many different independent data sets as possible to do validation. Then we also like to do comparisons to well-known public data sets; either from academia or from governments.” — David Marvin</p><p><br>“You really do have to have the three legs of the stool to be able to build a quality operational product that is meant for forest monitoring.” — David Marvin</p><p><br></p><p>“Making sure you have scientists on your team, making sure you're still active in the scientific publishing community, that you're up on the latest papers that are coming out, and basically acting like a scientist in an industry position is crucial to make any product work; especially in branding markets, like forest monitoring and carbon markets.” — David Marvin</p><p><br></p><p><strong>Links:</strong></p><p><a href="http://www.davidcmarvin.org/">David Marvin</a></p><p><a href="https://www.linkedin.com/in/dmarvs/">David Marvin on LinkedIn</a></p><p><a href="https://twitter.com/dmarvs">David Marvin on x</a></p><p><a href="https://salo.ai/">Salo Sciences</a></p><p><a href="https://www.planet.com/">Planet</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 15 Apr 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/98819e43/ae3f9884.mp3" length="65106488" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/wOsTvsxBI9dB35QPK5D8etDlic4tlNJI3g6fpAxDk5c/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS82YzBi/MGYzMTBlYTM0NjRk/YTdhOTMyMDQwMDMy/MjUxZi5wbmc.jpg"/>
      <itunes:duration>2712</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Bringing transparency and accuracy to the marketplace by producing high-quality data on all types of hard problems is a main focus for today’s guest and the company he works for. I am pleased to welcome David Marvin to Impact AI. David was the Co-Founder and CEO of Salo Sciences, which was acquired by Planet last year, and is now the Product Lead for Forest Ecosystems there! He joins me today to talk about monitoring forests. We delve into his background and path to Salo Sciences and their eventual acquisition by Planet; including the original mission and vision and what they worked to accomplish at Salo. David then explains his goals and focus at Planet, and unpacks the types of satellite imagery, models, and sensors they incorporate into their data and outputs. He highlights their approach to validation, how they are reducing bias, and how they are integrating extensive knowledge to empower their machine learning developers to create powerful models.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>David shares details about his background and path to Salo Sciences and Planet.</li><li>The original vision and mission of Salo Sciences and what they did there.</li><li>He explains how they leveraged large-scale airborne LiDAR collections and deep learning to create maps of vegetation fuels.</li><li>His goals and focus at Planet.</li><li>David unpacks the types of satellite imagery, models, and sensors they incorporate into their data and outputs.</li><li>How they validate that their models work in places where they do not have Airborne LiDAR.</li><li>Reducing the bias that results from only having data in a heterogeneous distribution of LiDAR sites around the world.</li><li>How they integrate their extensive knowledge to empower their machine-learning developers in creating powerful models.</li><li>The business benefits he’s seen from publishing and making it a priority.</li><li>His advice to other leaders of AI-powered startups.</li><li>His thoughts on the impact of the forest monitoring efforts at Planet in three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“A company like Planet was essentially probably the only company we would have really ever been acquired by just given their vision and the fact that they have their own satellites and we’re a satellite software company.” — David Marvin</p><p><br></p><p>“[At Salo Sciences] we leveraged high-quality airborne LiDAR measurements of forests all over California. Airborne LiDAR is one of these technologies, these sensors, that was on that airplane back in my post-doc lab. It shoots out hundreds of thousands of pulses of laser light per second and reflects back to the sensor, and it can basically recreate in three dimensions a forest, or a city, whatever your mapping target is. It's extremely precise. It's centimeter-level accuracy, and it's very high-quality data. We consider that the gold standard of forest measurement.” — David Marvin</p><p><br>“Ultimately, we want to produce a near-tree-level map of the world's forests, and we're well on our way to doing that and expect to be releasing that later this summer, or in the fall.” — David Marvin</p><p><br></p><p>“We approach the validation aspect from a few different angles, trying to source as many different independent data sets as possible to do validation. Then we also like to do comparisons to well-known public data sets; either from academia or from governments.” — David Marvin</p><p><br>“You really do have to have the three legs of the stool to be able to build a quality operational product that is meant for forest monitoring.” — David Marvin</p><p><br></p><p>“Making sure you have scientists on your team, making sure you're still active in the scientific publishing community, that you're up on the latest papers that are coming out, and basically acting like a scientist in an industry position is crucial to make any product work; especially in branding markets, like forest monitoring and carbon markets.” — David Marvin</p><p><br></p><p><strong>Links:</strong></p><p><a href="http://www.davidcmarvin.org/">David Marvin</a></p><p><a href="https://www.linkedin.com/in/dmarvs/">David Marvin on LinkedIn</a></p><p><a href="https://twitter.com/dmarvs">David Marvin on x</a></p><p><a href="https://salo.ai/">Salo Sciences</a></p><p><a href="https://www.planet.com/">Planet</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, forestry, remote sensing</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/98819e43/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Foundation Models for Pathology with Razik Yousfi from Paige</title>
      <itunes:episode>77</itunes:episode>
      <podcast:episode>77</podcast:episode>
      <itunes:title>Foundation Models for Pathology with Razik Yousfi from Paige</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">d9f60b6b-ece8-4fce-8d21-4b0f489f40e8</guid>
      <link>https://pixelscientia.com/podcast/foundation-models-for-pathology-with-razik-yousfi-from-paige/</link>
      <description>
        <![CDATA[<p>Foundation models have been at the forefront of AI discussions for a while now and joining me today on Impact AI is a leader in the creation of foundation models for pathology, Senior Vice President of Technology at Paige AI, Razik Yousfi. Tuning in, you’ll hear all about Razik’s incredible background leading him to Paige, what the company does and how it’s revolutionizing cancer care, and the role machine learning plays in pathology. Razik goes on to explain what foundation models are, why they are so helpful, how to train one, the differences in training one for pathology specifically, and how they use foundation models at Paige AI. We then delve into the challenges associated with the creation of foundation models before my guest shares some advice for leaders in machine learning. Finally, Razik tells us where he sees Paige AI in the next few years.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing today’s guest, Razik Yousfi.</li><li>An overview of Razik’s background and what led him to become Senior Vice President of Technology at Paige AI. </li><li>What Paige does and why it’s important for cancer care. </li><li>The role machine learning plays in pathology. </li><li>Razik tells us what a foundation model is, why it’s useful, and what it takes to train one. </li><li>The subtle differences in training a foundation model for pathology versus other data. </li><li>How they are using foundation models at Paige AI. </li><li>Razik discusses what the future of foundation models for pathology looks like. </li><li>Why Razik doesn’t suggest that every organization build a foundation model. </li><li>Our guest shares some advice for leaders of machine learning teams. </li><li>Where he sees the impact of Paige AI in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Paige is focusing on digital and computational pathology. In other words, we really bring AI and novel AI solutions to the field of pathology to help pathologists make better-informed decisions.” — Razik Yousfi</p><p><br></p><p>“A foundational model is a model trained on a very large set of data. The idea there is that you can, in turn, use that foundation model to build a wide range of downstream applications.” — Razik Yousfi</p><p><br></p><p>“Building a foundation model is not easy. So, I wouldn't necessarily recommend to every organization to build a foundation model.” — Razik Yousfi</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/yousfi/">Razik Yousfi on LinkedIn</a></p><p><a href="mailto:razik.yousfi@paige.ai">Razik Yousfi Email Address</a></p><p><a href="https://twitter.com/razikyousfi">Razik Yousfi on X</a></p><p><a href="https://paige.ai/">Paige AI</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Foundation models have been at the forefront of AI discussions for a while now and joining me today on Impact AI is a leader in the creation of foundation models for pathology, Senior Vice President of Technology at Paige AI, Razik Yousfi. Tuning in, you’ll hear all about Razik’s incredible background leading him to Paige, what the company does and how it’s revolutionizing cancer care, and the role machine learning plays in pathology. Razik goes on to explain what foundation models are, why they are so helpful, how to train one, the differences in training one for pathology specifically, and how they use foundation models at Paige AI. We then delve into the challenges associated with the creation of foundation models before my guest shares some advice for leaders in machine learning. Finally, Razik tells us where he sees Paige AI in the next few years.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing today’s guest, Razik Yousfi.</li><li>An overview of Razik’s background and what led him to become Senior Vice President of Technology at Paige AI. </li><li>What Paige does and why it’s important for cancer care. </li><li>The role machine learning plays in pathology. </li><li>Razik tells us what a foundation model is, why it’s useful, and what it takes to train one. </li><li>The subtle differences in training a foundation model for pathology versus other data. </li><li>How they are using foundation models at Paige AI. </li><li>Razik discusses what the future of foundation models for pathology looks like. </li><li>Why Razik doesn’t suggest that every organization build a foundation model. </li><li>Our guest shares some advice for leaders of machine learning teams. </li><li>Where he sees the impact of Paige AI in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Paige is focusing on digital and computational pathology. In other words, we really bring AI and novel AI solutions to the field of pathology to help pathologists make better-informed decisions.” — Razik Yousfi</p><p><br></p><p>“A foundational model is a model trained on a very large set of data. The idea there is that you can, in turn, use that foundation model to build a wide range of downstream applications.” — Razik Yousfi</p><p><br></p><p>“Building a foundation model is not easy. So, I wouldn't necessarily recommend to every organization to build a foundation model.” — Razik Yousfi</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/yousfi/">Razik Yousfi on LinkedIn</a></p><p><a href="mailto:razik.yousfi@paige.ai">Razik Yousfi Email Address</a></p><p><a href="https://twitter.com/razikyousfi">Razik Yousfi on X</a></p><p><a href="https://paige.ai/">Paige AI</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 08 Apr 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/0c04e219/8af7ce05.mp3" length="34945269" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/fAFbPbK-uaErLPZU8GD1DjSmikS7oKCqIgdS21fLkGg/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE4MDAwMDMv/MTcxMDk3MTYyMS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1452</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Foundation models have been at the forefront of AI discussions for a while now and joining me today on Impact AI is a leader in the creation of foundation models for pathology, Senior Vice President of Technology at Paige AI, Razik Yousfi. Tuning in, you’ll hear all about Razik’s incredible background leading him to Paige, what the company does and how it’s revolutionizing cancer care, and the role machine learning plays in pathology. Razik goes on to explain what foundation models are, why they are so helpful, how to train one, the differences in training one for pathology specifically, and how they use foundation models at Paige AI. We then delve into the challenges associated with the creation of foundation models before my guest shares some advice for leaders in machine learning. Finally, Razik tells us where he sees Paige AI in the next few years.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing today’s guest, Razik Yousfi.</li><li>An overview of Razik’s background and what led him to become Senior Vice President of Technology at Paige AI. </li><li>What Paige does and why it’s important for cancer care. </li><li>The role machine learning plays in pathology. </li><li>Razik tells us what a foundation model is, why it’s useful, and what it takes to train one. </li><li>The subtle differences in training a foundation model for pathology versus other data. </li><li>How they are using foundation models at Paige AI. </li><li>Razik discusses what the future of foundation models for pathology looks like. </li><li>Why Razik doesn’t suggest that every organization build a foundation model. </li><li>Our guest shares some advice for leaders of machine learning teams. </li><li>Where he sees the impact of Paige AI in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Paige is focusing on digital and computational pathology. In other words, we really bring AI and novel AI solutions to the field of pathology to help pathologists make better-informed decisions.” — Razik Yousfi</p><p><br></p><p>“A foundational model is a model trained on a very large set of data. The idea there is that you can, in turn, use that foundation model to build a wide range of downstream applications.” — Razik Yousfi</p><p><br></p><p>“Building a foundation model is not easy. So, I wouldn't necessarily recommend to every organization to build a foundation model.” — Razik Yousfi</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/yousfi/">Razik Yousfi on LinkedIn</a></p><p><a href="mailto:razik.yousfi@paige.ai">Razik Yousfi Email Address</a></p><p><a href="https://twitter.com/razikyousfi">Razik Yousfi on X</a></p><p><a href="https://paige.ai/">Paige AI</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, pathology, foundation models, histopathology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/0c04e219/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Monitoring Biodiversity with Noelia Jiménez Martínez from NatureMetrics</title>
      <itunes:episode>76</itunes:episode>
      <podcast:episode>76</podcast:episode>
      <itunes:title>Monitoring Biodiversity with Noelia Jiménez Martínez from NatureMetrics</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">7d2891b3-2c19-4a39-9b3e-edd11cd35fb7</guid>
      <link>https://pixelscientia.com/podcast/monitoring-biodiversity-with-noelia-jimenez-martinez-from-naturemetrics/</link>
      <description>
        <![CDATA[<p>Biodiversity is not just an ecological concern. As you’ll learn in this episode, it has tangible economic implications too. Today on Impact AI, I'm joined by Dr. Noelia Jimenez Martinez, Head of Insights and Machine Learning at NatureMetrics, to talk about biodiversity monitoring. NatureMetrics is a global nature intelligence technology company providing end-to-end nature monitoring and impact reporting. Powered by eDNA, their Nature Intelligence Platform allows any company to manage its impacts and dependencies on biodiversity at scale, translating the complexities of nature into simple insights that help to inform the best decisions for both the planet and business. Tuning in, you’ll learn about the importance of NatureMetrics’ work, the role that machine learning plays in their technology, and some of the challenges that come with working with sometimes unpredictable data from nature. In my conversation with Noelia, we also touched on why biodiversity is an increasingly urgent imperative for businesses of all kinds, how NatureMetrics is democratizing biodiversity monitoring, and much more!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into Noelia's background in astrophysics and how it led her to NatureMetrics.</li><li>What NatureMetrics does, what eDNA is, and why it’s so important for sustainability.</li><li>The major role that machine learning plays in NatureMetrics' technology.</li><li>Specific examples of the types of models that NatureMetrics trains.</li><li>How <em>Jurassic Park</em> helps us understand what eDNA data looks like.</li><li>Different ways that this data is gathered depending on the relevant project.</li><li>Unique challenges of sampling for eDNA and training models based on those datasets.</li><li>How NatureMetrics measures the impact of its technology and makes biodiversity monitoring more accessible and achievable.</li><li>Noelia’s urgent and common sense advice for other leaders of AI-powered startups.</li><li>What the future holds for NatureMetrics and how their impact will continue to grow.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“I couldn't focus too much on solving galaxy formation with the amount of bad news I was seeing in the climate space and biodiversity collapse. I made a transition – [to] looking for jobs to apply [my astrophysics skills to] related problems in climate and biodiversity.” — Noelia Jiménez Martínez</p><p><br></p><p>“Nature does not seem to behave [as well] as we would want. It might be that you have exactly the same covariates and your model is predicting species, and then you go, and it's not there.” — Noelia Jiménez Martínez</p><p><br></p><p>“[Most companies] will have to report on their sustainability strategies in the world to keep on functioning. In that context, what we can do here is make biodiversity monitoring achievable and democratically easy to access.” — Noelia Jiménez Martínez</p><p><br></p><p>“The success of [an AI startup is] – tied up to the diverse, strong teams you build.” — Noelia Jiménez Martínez</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.naturemetrics.com/">NatureMetrics</a></p><p><a href="https://www.linkedin.com/in/dr-noelia-jim%C3%A9nez-mart%C3%ADnez/">Dr. Noelia Jiménez Martínez on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Biodiversity is not just an ecological concern. As you’ll learn in this episode, it has tangible economic implications too. Today on Impact AI, I'm joined by Dr. Noelia Jimenez Martinez, Head of Insights and Machine Learning at NatureMetrics, to talk about biodiversity monitoring. NatureMetrics is a global nature intelligence technology company providing end-to-end nature monitoring and impact reporting. Powered by eDNA, their Nature Intelligence Platform allows any company to manage its impacts and dependencies on biodiversity at scale, translating the complexities of nature into simple insights that help to inform the best decisions for both the planet and business. Tuning in, you’ll learn about the importance of NatureMetrics’ work, the role that machine learning plays in their technology, and some of the challenges that come with working with sometimes unpredictable data from nature. In my conversation with Noelia, we also touched on why biodiversity is an increasingly urgent imperative for businesses of all kinds, how NatureMetrics is democratizing biodiversity monitoring, and much more!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into Noelia's background in astrophysics and how it led her to NatureMetrics.</li><li>What NatureMetrics does, what eDNA is, and why it’s so important for sustainability.</li><li>The major role that machine learning plays in NatureMetrics' technology.</li><li>Specific examples of the types of models that NatureMetrics trains.</li><li>How <em>Jurassic Park</em> helps us understand what eDNA data looks like.</li><li>Different ways that this data is gathered depending on the relevant project.</li><li>Unique challenges of sampling for eDNA and training models based on those datasets.</li><li>How NatureMetrics measures the impact of its technology and makes biodiversity monitoring more accessible and achievable.</li><li>Noelia’s urgent and common sense advice for other leaders of AI-powered startups.</li><li>What the future holds for NatureMetrics and how their impact will continue to grow.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“I couldn't focus too much on solving galaxy formation with the amount of bad news I was seeing in the climate space and biodiversity collapse. I made a transition – [to] looking for jobs to apply [my astrophysics skills to] related problems in climate and biodiversity.” — Noelia Jiménez Martínez</p><p><br></p><p>“Nature does not seem to behave [as well] as we would want. It might be that you have exactly the same covariates and your model is predicting species, and then you go, and it's not there.” — Noelia Jiménez Martínez</p><p><br></p><p>“[Most companies] will have to report on their sustainability strategies in the world to keep on functioning. In that context, what we can do here is make biodiversity monitoring achievable and democratically easy to access.” — Noelia Jiménez Martínez</p><p><br></p><p>“The success of [an AI startup is] – tied up to the diverse, strong teams you build.” — Noelia Jiménez Martínez</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.naturemetrics.com/">NatureMetrics</a></p><p><a href="https://www.linkedin.com/in/dr-noelia-jim%C3%A9nez-mart%C3%ADnez/">Dr. Noelia Jiménez Martínez on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 01 Apr 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/d6530a16/9228eea0.mp3" length="34403551" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/bc8erNxDrzw5wdMWSnRtDL_DLS9_Et2Av60TBIBD-t0/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE4MDAwMDAv/MTcxMDk3MTQ3Mi1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1428</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Biodiversity is not just an ecological concern. As you’ll learn in this episode, it has tangible economic implications too. Today on Impact AI, I'm joined by Dr. Noelia Jimenez Martinez, Head of Insights and Machine Learning at NatureMetrics, to talk about biodiversity monitoring. NatureMetrics is a global nature intelligence technology company providing end-to-end nature monitoring and impact reporting. Powered by eDNA, their Nature Intelligence Platform allows any company to manage its impacts and dependencies on biodiversity at scale, translating the complexities of nature into simple insights that help to inform the best decisions for both the planet and business. Tuning in, you’ll learn about the importance of NatureMetrics’ work, the role that machine learning plays in their technology, and some of the challenges that come with working with sometimes unpredictable data from nature. In my conversation with Noelia, we also touched on why biodiversity is an increasingly urgent imperative for businesses of all kinds, how NatureMetrics is democratizing biodiversity monitoring, and much more!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Insight into Noelia's background in astrophysics and how it led her to NatureMetrics.</li><li>What NatureMetrics does, what eDNA is, and why it’s so important for sustainability.</li><li>The major role that machine learning plays in NatureMetrics' technology.</li><li>Specific examples of the types of models that NatureMetrics trains.</li><li>How <em>Jurassic Park</em> helps us understand what eDNA data looks like.</li><li>Different ways that this data is gathered depending on the relevant project.</li><li>Unique challenges of sampling for eDNA and training models based on those datasets.</li><li>How NatureMetrics measures the impact of its technology and makes biodiversity monitoring more accessible and achievable.</li><li>Noelia’s urgent and common sense advice for other leaders of AI-powered startups.</li><li>What the future holds for NatureMetrics and how their impact will continue to grow.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“I couldn't focus too much on solving galaxy formation with the amount of bad news I was seeing in the climate space and biodiversity collapse. I made a transition – [to] looking for jobs to apply [my astrophysics skills to] related problems in climate and biodiversity.” — Noelia Jiménez Martínez</p><p><br></p><p>“Nature does not seem to behave [as well] as we would want. It might be that you have exactly the same covariates and your model is predicting species, and then you go, and it's not there.” — Noelia Jiménez Martínez</p><p><br></p><p>“[Most companies] will have to report on their sustainability strategies in the world to keep on functioning. In that context, what we can do here is make biodiversity monitoring achievable and democratically easy to access.” — Noelia Jiménez Martínez</p><p><br></p><p>“The success of [an AI startup is] – tied up to the diverse, strong teams you build.” — Noelia Jiménez Martínez</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.naturemetrics.com/">NatureMetrics</a></p><p><a href="https://www.linkedin.com/in/dr-noelia-jim%C3%A9nez-mart%C3%ADnez/">Dr. Noelia Jiménez Martínez on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, biodiversity, environmental DNA</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/d6530a16/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Generative AI for Life Sciences with Simon Arkell from Ryght</title>
      <itunes:episode>75</itunes:episode>
      <podcast:episode>75</podcast:episode>
      <itunes:title>Generative AI for Life Sciences with Simon Arkell from Ryght</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">8b8f3810-1aa7-4471-927a-2544b084f229</guid>
      <link>https://pixelscientia.com/podcast/generative-ai-for-life-sciences-with-simon-arkell-from-ryght/</link>
      <description>
        <![CDATA[<p>In today’s episode, I am joined by Simon Arkell, the visionary CEO and co-founder of Ryght, to talk about copilots and the application of generative AI in life sciences. Ryght is dedicated to revolutionizing the field of life sciences through the power of AI. By leveraging cutting-edge technology and innovative solutions, Ryght aims to empower professionals and organizations within the life sciences industry to streamline processes, enhance productivity, and drive meaningful outcomes.</p><p>In our conversation, we discuss Simon's entrepreneurial background, the various companies he has founded, and what led him to create Ryght. We delve into the pivotal role of enterprise-scale, secure AI solutions in healthcare, and learn how Ryght's platform is reshaping the landscape of drug development and clinical research. Discover the intricate workings of generative AI copilots, the challenges of minimizing hallucinations and validating AI models, and why the utility of the approach at the enterprise level is essential. Simon also shares Ryght’s long-term goals and invaluable advice for leaders of AI startups. Join us, as we explore a world where healthcare and life sciences are transformed by cutting-edge technology with Simon Arkell from Ryght!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Hear about Simon’s background and his path to founding Ryght.</li><li>Ryght’s generative AI approach, its potential in life sciences, and the role of copilots.</li><li>The importance of enterprise-scale, secure AI solutions in healthcare.</li><li>How generative AI copilots accelerate drug development processes.</li><li>Differences between training models for life sciences versus generic AI models.</li><li>Discover the challenges encountered in AI-powered solutions.</li><li>Explore the company’s approach to customer feedback and model validation.</li><li>Strategic considerations and advice for leaders of AI startups.</li><li>Ryght’s mission to transform the healthcare and life sciences industry.</li><li>Where to find more information about Ryght and connect with Simon.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We built an enterprise-secure version of Generative AI that has many different features that allow large companies and small companies to very securely benefit from Generative AI without all of the issues that a very insecure, non-industry-trained solution might create.” — Simon Arkell</p><p><br></p><p>“With this type of [generative AI] technology, you have the ability to completely unlock new formulas, and new molecules that could be life-changing.” — Simon Arkell</p><p><br></p><p>“Improving the utility of the platform comes down to the efficacy of the output. It comes down to the in-context learning, the ensembling, and the prompting.<strong> </strong>But at the end of the day, a human has to determine, in many cases, the accuracy and relevance of a specific answer.” — Simon Arkell</p><p><br></p><p>“It's not really about building models. It's about making sure that the right models are being utilized for the copilot.” — Simon Arkell</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/simonarkell/">Simon Arkell on LinkedIn</a></p><p><a href="https://www.ryght.ai">Ryght</a></p><p><a href="https://www.linkedin.com/company/thats-ryght/">Ryght on LinkedIn</a></p><p><a href="https://twitter.com/ryghtai">Ryght on X</a></p><p><a href="https://www.youtube.com/@RyghtAI">Ryght on YouTube</a></p><p><br></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In today’s episode, I am joined by Simon Arkell, the visionary CEO and co-founder of Ryght, to talk about copilots and the application of generative AI in life sciences. Ryght is dedicated to revolutionizing the field of life sciences through the power of AI. By leveraging cutting-edge technology and innovative solutions, Ryght aims to empower professionals and organizations within the life sciences industry to streamline processes, enhance productivity, and drive meaningful outcomes.</p><p>In our conversation, we discuss Simon's entrepreneurial background, the various companies he has founded, and what led him to create Ryght. We delve into the pivotal role of enterprise-scale, secure AI solutions in healthcare, and learn how Ryght's platform is reshaping the landscape of drug development and clinical research. Discover the intricate workings of generative AI copilots, the challenges of minimizing hallucinations and validating AI models, and why the utility of the approach at the enterprise level is essential. Simon also shares Ryght’s long-term goals and invaluable advice for leaders of AI startups. Join us, as we explore a world where healthcare and life sciences are transformed by cutting-edge technology with Simon Arkell from Ryght!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Hear about Simon’s background and his path to founding Ryght.</li><li>Ryght’s generative AI approach, its potential in life sciences, and the role of copilots.</li><li>The importance of enterprise-scale, secure AI solutions in healthcare.</li><li>How generative AI copilots accelerate drug development processes.</li><li>Differences between training models for life sciences versus generic AI models.</li><li>Discover the challenges encountered in AI-powered solutions.</li><li>Explore the company’s approach to customer feedback and model validation.</li><li>Strategic considerations and advice for leaders of AI startups.</li><li>Ryght’s mission to transform the healthcare and life sciences industry.</li><li>Where to find more information about Ryght and connect with Simon.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We built an enterprise-secure version of Generative AI that has many different features that allow large companies and small companies to very securely benefit from Generative AI without all of the issues that a very insecure, non-industry-trained solution might create.” — Simon Arkell</p><p><br></p><p>“With this type of [generative AI] technology, you have the ability to completely unlock new formulas, and new molecules that could be life-changing.” — Simon Arkell</p><p><br></p><p>“Improving the utility of the platform comes down to the efficacy of the output. It comes down to the in-context learning, the ensembling, and the prompting.<strong> </strong>But at the end of the day, a human has to determine, in many cases, the accuracy and relevance of a specific answer.” — Simon Arkell</p><p><br></p><p>“It's not really about building models. It's about making sure that the right models are being utilized for the copilot.” — Simon Arkell</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/simonarkell/">Simon Arkell on LinkedIn</a></p><p><a href="https://www.ryght.ai">Ryght</a></p><p><a href="https://www.linkedin.com/company/thats-ryght/">Ryght on LinkedIn</a></p><p><a href="https://twitter.com/ryghtai">Ryght on X</a></p><p><a href="https://www.youtube.com/@RyghtAI">Ryght on YouTube</a></p><p><br></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 25 Mar 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/5bc1e234/79a39b9a.mp3" length="28910728" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/RvcepBpb4ovn4YaXmr5j5EoFwTOft6lI4qr0Bc8yNfc/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE3OTM2NzEv/MTcxMDY5MTMyMi1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1800</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In today’s episode, I am joined by Simon Arkell, the visionary CEO and co-founder of Ryght, to talk about copilots and the application of generative AI in life sciences. Ryght is dedicated to revolutionizing the field of life sciences through the power of AI. By leveraging cutting-edge technology and innovative solutions, Ryght aims to empower professionals and organizations within the life sciences industry to streamline processes, enhance productivity, and drive meaningful outcomes.</p><p>In our conversation, we discuss Simon's entrepreneurial background, the various companies he has founded, and what led him to create Ryght. We delve into the pivotal role of enterprise-scale, secure AI solutions in healthcare, and learn how Ryght's platform is reshaping the landscape of drug development and clinical research. Discover the intricate workings of generative AI copilots, the challenges of minimizing hallucinations and validating AI models, and why the utility of the approach at the enterprise level is essential. Simon also shares Ryght’s long-term goals and invaluable advice for leaders of AI startups. Join us, as we explore a world where healthcare and life sciences are transformed by cutting-edge technology with Simon Arkell from Ryght!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Hear about Simon’s background and his path to founding Ryght.</li><li>Ryght’s generative AI approach, its potential in life sciences, and the role of copilots.</li><li>The importance of enterprise-scale, secure AI solutions in healthcare.</li><li>How generative AI copilots accelerate drug development processes.</li><li>Differences between training models for life sciences versus generic AI models.</li><li>Discover the challenges encountered in AI-powered solutions.</li><li>Explore the company’s approach to customer feedback and model validation.</li><li>Strategic considerations and advice for leaders of AI startups.</li><li>Ryght’s mission to transform the healthcare and life sciences industry.</li><li>Where to find more information about Ryght and connect with Simon.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We built an enterprise-secure version of Generative AI that has many different features that allow large companies and small companies to very securely benefit from Generative AI without all of the issues that a very insecure, non-industry-trained solution might create.” — Simon Arkell</p><p><br></p><p>“With this type of [generative AI] technology, you have the ability to completely unlock new formulas, and new molecules that could be life-changing.” — Simon Arkell</p><p><br></p><p>“Improving the utility of the platform comes down to the efficacy of the output. It comes down to the in-context learning, the ensembling, and the prompting.<strong> </strong>But at the end of the day, a human has to determine, in many cases, the accuracy and relevance of a specific answer.” — Simon Arkell</p><p><br></p><p>“It's not really about building models. It's about making sure that the right models are being utilized for the copilot.” — Simon Arkell</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/simonarkell/">Simon Arkell on LinkedIn</a></p><p><a href="https://www.ryght.ai">Ryght</a></p><p><a href="https://www.linkedin.com/company/thats-ryght/">Ryght on LinkedIn</a></p><p><a href="https://twitter.com/ryghtai">Ryght on X</a></p><p><a href="https://www.youtube.com/@RyghtAI">Ryght on YouTube</a></p><p><br></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, life sciences, copilot, genAI</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/5bc1e234/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Enabling Early Disease Detection with Sean Cassidy from Lucem Health</title>
      <itunes:episode>74</itunes:episode>
      <podcast:episode>74</podcast:episode>
      <itunes:title>Enabling Early Disease Detection with Sean Cassidy from Lucem Health</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">66e90d48-7547-424d-aa88-946a3e615851</guid>
      <link>https://pixelscientia.com/podcast/enabling-early-disease-detection-with-sean-cassidy-from-lucem-health/</link>
      <description>
        <![CDATA[<p>AI in healthcare is one of the most researched areas today, particularly on the clinical side of healthcare. Sean Cassidy is the Co-Founder and CEO of Lucem Health. Having worked in digital health for the last twenty years, he joins me today to talk about identifying chronic diseases. Tune in to hear how AI and machine learning are creating efficiencies for different forms of healthcare data, and how changes and challenges are being addressed to improve the process. Going beyond workflow support, we discuss considerations to bear in mind when integrating AI into healthcare systems and how to meaningfully measure efficacy in a clinical context. Sean shares some hard-earned wisdom about leading an AI startup, reveals his big vision for the future of Lucem Health, and much more.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing guest Sean Cassidy, who co-founded Lucem Health. </li><li>Defining digital health through an overview of Sean’s history in this industry. </li><li>The founding idea behind Lucem Health. </li><li>Different forms of healthcare data and how AI and machine learning can support them. </li><li>Navigating changes in external variables and patient circumstances.</li><li>The downstream diagnosis process and why patients are rarely re-assessed.</li><li>How Lucem Health’s approach facilitates doctors as they continue as they always have. </li><li>Considerations to bear in mind with the clinical adoption of AI beyond workflow.</li><li>How efficacy is measured in a clinical context.</li><li>Advice for leaders in AI startups.</li><li>A vision for the future of Lucem Health. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We are focused on early disease detection almost exclusively, and so that is using AI and machine learning algorithms to, at any point in time, evaluate the risk that a patient may have a certain disease.” — Sean Cassidy</p><p><br></p><p>“Workflow is really important, but there are also other considerations that matter in terms of AI being more widely adopted in clinical settings and healthcare.” — Sean Cassidy</p><p><br></p><p>“We are always evaluating and trying to get a deep understanding of whether what we said was going to happen with respect to the performance of the solution is actually manifesting itself in the real world.” — Sean Cassidy</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/sean-cassidy-5b6791/">Sean Cassidy on LinkedIn</a></p><p><a href="https://lucemhealth.com/">Lucem Health</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>AI in healthcare is one of the most researched areas today, particularly on the clinical side of healthcare. Sean Cassidy is the Co-Founder and CEO of Lucem Health. Having worked in digital health for the last twenty years, he joins me today to talk about identifying chronic diseases. Tune in to hear how AI and machine learning are creating efficiencies for different forms of healthcare data, and how changes and challenges are being addressed to improve the process. Going beyond workflow support, we discuss considerations to bear in mind when integrating AI into healthcare systems and how to meaningfully measure efficacy in a clinical context. Sean shares some hard-earned wisdom about leading an AI startup, reveals his big vision for the future of Lucem Health, and much more.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing guest Sean Cassidy, who co-founded Lucem Health. </li><li>Defining digital health through an overview of Sean’s history in this industry. </li><li>The founding idea behind Lucem Health. </li><li>Different forms of healthcare data and how AI and machine learning can support them. </li><li>Navigating changes in external variables and patient circumstances.</li><li>The downstream diagnosis process and why patients are rarely re-assessed.</li><li>How Lucem Health’s approach facilitates doctors as they continue as they always have. </li><li>Considerations to bear in mind with the clinical adoption of AI beyond workflow.</li><li>How efficacy is measured in a clinical context.</li><li>Advice for leaders in AI startups.</li><li>A vision for the future of Lucem Health. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We are focused on early disease detection almost exclusively, and so that is using AI and machine learning algorithms to, at any point in time, evaluate the risk that a patient may have a certain disease.” — Sean Cassidy</p><p><br></p><p>“Workflow is really important, but there are also other considerations that matter in terms of AI being more widely adopted in clinical settings and healthcare.” — Sean Cassidy</p><p><br></p><p>“We are always evaluating and trying to get a deep understanding of whether what we said was going to happen with respect to the performance of the solution is actually manifesting itself in the real world.” — Sean Cassidy</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/sean-cassidy-5b6791/">Sean Cassidy on LinkedIn</a></p><p><a href="https://lucemhealth.com/">Lucem Health</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 18 Mar 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/301bd41d/ce986515.mp3" length="25747656" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/uCXin5zbNZ4R-E9fOq7UC2KAU0PEbuBLsTHcjTYmXeo/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE3NDAzMzUv/MTcwODExMzExMy1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1070</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>AI in healthcare is one of the most researched areas today, particularly on the clinical side of healthcare. Sean Cassidy is the Co-Founder and CEO of Lucem Health. Having worked in digital health for the last twenty years, he joins me today to talk about identifying chronic diseases. Tune in to hear how AI and machine learning are creating efficiencies for different forms of healthcare data, and how changes and challenges are being addressed to improve the process. Going beyond workflow support, we discuss considerations to bear in mind when integrating AI into healthcare systems and how to meaningfully measure efficacy in a clinical context. Sean shares some hard-earned wisdom about leading an AI startup, reveals his big vision for the future of Lucem Health, and much more.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing guest Sean Cassidy, who co-founded Lucem Health. </li><li>Defining digital health through an overview of Sean’s history in this industry. </li><li>The founding idea behind Lucem Health. </li><li>Different forms of healthcare data and how AI and machine learning can support them. </li><li>Navigating changes in external variables and patient circumstances.</li><li>The downstream diagnosis process and why patients are rarely re-assessed.</li><li>How Lucem Health’s approach facilitates doctors as they continue as they always have. </li><li>Considerations to bear in mind with the clinical adoption of AI beyond workflow.</li><li>How efficacy is measured in a clinical context.</li><li>Advice for leaders in AI startups.</li><li>A vision for the future of Lucem Health. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We are focused on early disease detection almost exclusively, and so that is using AI and machine learning algorithms to, at any point in time, evaluate the risk that a patient may have a certain disease.” — Sean Cassidy</p><p><br></p><p>“Workflow is really important, but there are also other considerations that matter in terms of AI being more widely adopted in clinical settings and healthcare.” — Sean Cassidy</p><p><br></p><p>“We are always evaluating and trying to get a deep understanding of whether what we said was going to happen with respect to the performance of the solution is actually manifesting itself in the real world.” — Sean Cassidy</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/sean-cassidy-5b6791/">Sean Cassidy on LinkedIn</a></p><p><a href="https://lucemhealth.com/">Lucem Health</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, healthcare</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/301bd41d/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Self-Supervised Learning for Histopathology with Jean-Baptiste Schiratti from Owkin</title>
      <itunes:episode>73</itunes:episode>
      <podcast:episode>73</podcast:episode>
      <itunes:title>Self-Supervised Learning for Histopathology with Jean-Baptiste Schiratti from Owkin</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">c95a28a1-423c-4c7d-bba0-23b7a0b7d18b</guid>
      <link>https://pixelscientia.com/podcast/self-supervised-learning-for-histopathology-with-jean-baptiste-schiratti-from-owkin/</link>
      <description>
        <![CDATA[<p>In this episode, I sit down with Jean-Baptiste Schiratti, Medical Imaging Group Lead and Lead Research Scientist at Owkin, to discuss the application of self-supervised learning in drug development and diagnostics. Owkin is a groundbreaking AI biotechnology company revolutionizing the field of medical research and treatment. It aims to bridge the gap between complex biological understanding and the development of innovative treatments. In our conversation, we discuss his background, Owkin's mission, and the importance of AI in healthcare. We delve into self-supervised learning, its benefits, and its application in pathology. Gain insights into the significance of data diversity and computational resources in training self-supervised models and the development of multimodal foundation models. He also shares the impact Owkin aims to achieve in the coming years and the next hurdle for self-supervised learning.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Jean-Baptiste Schiratti, his background, and path to Owkin.</li><li>Details about Owkin, its mission, and why its work is significant.</li><li>The application of self-supervised learning in drug development and diagnostics.</li><li>Examples of the different applications of self-supervised learning.</li><li>Discover the process behind training self-supervised models for pathology.</li><li>Explore the various benefits of using self-supervised learning.</li><li>His approach for structuring the data used for self-supervised learning.</li><li>Unpack the potential impact of self-supervised AI models on pathology.</li><li>Gain insights into the next frontier of foundation model development.</li><li>He shares his hopes for the future impact of Owkin.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“To be able to train efficiently, computer vision backbones, you actually need to have a lot of compute and that can be very costly.” — Jean-Baptiste Schiratti</p><p><br></p><p>“There are some models that are indeed particular to specific types of tissue or specific sub-types of cancers and also the models can have different architectures and different sizes, they come in different flavors.” — Jean-Baptiste Schiratti</p><p><br></p><p>“The more diverse the [training] data is, the better.” — Jean-Baptiste Schiratti</p><p><br></p><p>“I’m convinced that the foundation models will play a very important role in digital pathology and I think this is already happening.” — Jean-Baptiste Schiratti</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/jbschiratti/">Jean-Baptiste Schiratti on LinkedIn</a></p><p><a href="https://twitter.com/jbschiratti">Jean-Baptiste Schiratti on X</a></p><p><a href="https://www.owkin.com">Owkin</a></p><p><a href="https://huggingface.co/owkin/phikon">Phikon</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I sit down with Jean-Baptiste Schiratti, Medical Imaging Group Lead and Lead Research Scientist at Owkin, to discuss the application of self-supervised learning in drug development and diagnostics. Owkin is a groundbreaking AI biotechnology company revolutionizing the field of medical research and treatment. It aims to bridge the gap between complex biological understanding and the development of innovative treatments. In our conversation, we discuss his background, Owkin's mission, and the importance of AI in healthcare. We delve into self-supervised learning, its benefits, and its application in pathology. Gain insights into the significance of data diversity and computational resources in training self-supervised models and the development of multimodal foundation models. He also shares the impact Owkin aims to achieve in the coming years and the next hurdle for self-supervised learning.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Jean-Baptiste Schiratti, his background, and path to Owkin.</li><li>Details about Owkin, its mission, and why its work is significant.</li><li>The application of self-supervised learning in drug development and diagnostics.</li><li>Examples of the different applications of self-supervised learning.</li><li>Discover the process behind training self-supervised models for pathology.</li><li>Explore the various benefits of using self-supervised learning.</li><li>His approach for structuring the data used for self-supervised learning.</li><li>Unpack the potential impact of self-supervised AI models on pathology.</li><li>Gain insights into the next frontier of foundation model development.</li><li>He shares his hopes for the future impact of Owkin.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“To be able to train efficiently, computer vision backbones, you actually need to have a lot of compute and that can be very costly.” — Jean-Baptiste Schiratti</p><p><br></p><p>“There are some models that are indeed particular to specific types of tissue or specific sub-types of cancers and also the models can have different architectures and different sizes, they come in different flavors.” — Jean-Baptiste Schiratti</p><p><br></p><p>“The more diverse the [training] data is, the better.” — Jean-Baptiste Schiratti</p><p><br></p><p>“I’m convinced that the foundation models will play a very important role in digital pathology and I think this is already happening.” — Jean-Baptiste Schiratti</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/jbschiratti/">Jean-Baptiste Schiratti on LinkedIn</a></p><p><a href="https://twitter.com/jbschiratti">Jean-Baptiste Schiratti on X</a></p><p><a href="https://www.owkin.com">Owkin</a></p><p><a href="https://huggingface.co/owkin/phikon">Phikon</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 11 Mar 2024 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/2b997c9b/698e2a71.mp3" length="24290367" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/HfB06ptXmMf3XeVHQ3oKPM80NB-qe5vnXwmemLlgbCk/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE3NDAzMzIv/MTcwODExMjk4OC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1008</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode, I sit down with Jean-Baptiste Schiratti, Medical Imaging Group Lead and Lead Research Scientist at Owkin, to discuss the application of self-supervised learning in drug development and diagnostics. Owkin is a groundbreaking AI biotechnology company revolutionizing the field of medical research and treatment. It aims to bridge the gap between complex biological understanding and the development of innovative treatments. In our conversation, we discuss his background, Owkin's mission, and the importance of AI in healthcare. We delve into self-supervised learning, its benefits, and its application in pathology. Gain insights into the significance of data diversity and computational resources in training self-supervised models and the development of multimodal foundation models. He also shares the impact Owkin aims to achieve in the coming years and the next hurdle for self-supervised learning.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Jean-Baptiste Schiratti, his background, and path to Owkin.</li><li>Details about Owkin, its mission, and why its work is significant.</li><li>The application of self-supervised learning in drug development and diagnostics.</li><li>Examples of the different applications of self-supervised learning.</li><li>Discover the process behind training self-supervised models for pathology.</li><li>Explore the various benefits of using self-supervised learning.</li><li>His approach for structuring the data used for self-supervised learning.</li><li>Unpack the potential impact of self-supervised AI models on pathology.</li><li>Gain insights into the next frontier of foundation model development.</li><li>He shares his hopes for the future impact of Owkin.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“To be able to train efficiently, computer vision backbones, you actually need to have a lot of compute and that can be very costly.” — Jean-Baptiste Schiratti</p><p><br></p><p>“There are some models that are indeed particular to specific types of tissue or specific sub-types of cancers and also the models can have different architectures and different sizes, they come in different flavors.” — Jean-Baptiste Schiratti</p><p><br></p><p>“The more diverse the [training] data is, the better.” — Jean-Baptiste Schiratti</p><p><br></p><p>“I’m convinced that the foundation models will play a very important role in digital pathology and I think this is already happening.” — Jean-Baptiste Schiratti</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/jbschiratti/">Jean-Baptiste Schiratti on LinkedIn</a></p><p><a href="https://twitter.com/jbschiratti">Jean-Baptiste Schiratti on X</a></p><p><a href="https://www.owkin.com">Owkin</a></p><p><a href="https://huggingface.co/owkin/phikon">Phikon</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, computational pathology, healthcare, medical imaging</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/2b997c9b/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Mental Health Screening with Linda Chung and Michael Mullarkey from Aiberry</title>
      <itunes:episode>72</itunes:episode>
      <podcast:episode>72</podcast:episode>
      <itunes:title>Mental Health Screening with Linda Chung and Michael Mullarkey from Aiberry</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">db8a27db-7b94-433e-a1bb-e7bea366c4a4</guid>
      <link>https://pixelscientia.com/podcast/mental-health-screening-with-linda-chung-and-michael-mullarkey-from-aiberry/</link>
      <description>
        <![CDATA[<p>Joining me today are Linda Chung and Michael Mullarkey to discuss the transformative potential of AI in mental health care. Linda is the co-CEO and Co-Founder of Aiberry, a groundbreaking AI company redefining mental healthcare accessibility. With a background in speech-language pathology, Linda pioneered telehealth services and now leads Aiberry in leveraging innovative technology for objective mental health screenings. Michael, the Senior Clinical Data Scientist at Aiberry, is dedicated to translating complex data science into tangible human value. His unique background in clinical psychology merged with a passion for coding drives his mission to address pressing human concerns through data.</p><p>In our conversation, we explore the fascinating intersection of clinical expertise and artificial intelligence, unlocking personalized insights and proactive strategies for mental well-being. Hear about Aiberry’s innovative chatbot “Botberry” and how it helps provide insights into the user’s mental health. We also get into the weeds and unpack how Aiberry develops its models, data source challenges, the value of custom models, mitigating model biases, and much more! Our guests also provide invaluable advice for other startups and share their future vision for the company. Tune in and discover AI technology at the forefront of mental health innovation with Linda Chung and Michael Mullarkey from Aiberry!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Linda’s healthcare background and motivation for starting Aiberry.</li><li>Michael's transition from clinical psychology to AI at Aiberry.</li><li>Aiberry's AI-powered mental health assessment platform and its unique approach.</li><li>The role of machine learning in Aiberry's technology.</li><li>Model development, data collection challenges, and custom model creation.</li><li>Addressing bias in models trained on patient interview data.</li><li>Measuring impact and success metrics at Aiberry</li><li>Advice for leaders of AI-powered startups.</li><li>The vision for Aiberry's impact in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We know that early detection leads to early intervention and better outcomes.” — Linda Chung </p><p><br></p><p>“Our models take the messy, natural human way that people talk about their mental health, and we turn it into systematic data that are necessary for the healthcare industry and report it back to the user.” — Linda Chung</p><p><br></p><p>“As a health tech company, we have to take the health and the tech elements of our business equally seriously. So, one of our guiding principles from a health perspective is we have to keep people's data secure.” — Michael Mullarkey</p><p><br></p><p>“We really value letting people talk about their mental health in their own words, and that can lead to some unexpected outcomes on the modeling side of the operation.” — Michael Mullarkey</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/linda-chung-905733146/">Linda Chung on LinkedIn</a></p><p><a href="https://www.linkedin.com/in/mcmullarkey/">Michael Mullarkey on LinkedIn</a></p><p><a href="https://aiberry.io">Aiberry</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Joining me today are Linda Chung and Michael Mullarkey to discuss the transformative potential of AI in mental health care. Linda is the co-CEO and Co-Founder of Aiberry, a groundbreaking AI company redefining mental healthcare accessibility. With a background in speech-language pathology, Linda pioneered telehealth services and now leads Aiberry in leveraging innovative technology for objective mental health screenings. Michael, the Senior Clinical Data Scientist at Aiberry, is dedicated to translating complex data science into tangible human value. His unique background in clinical psychology merged with a passion for coding drives his mission to address pressing human concerns through data.</p><p>In our conversation, we explore the fascinating intersection of clinical expertise and artificial intelligence, unlocking personalized insights and proactive strategies for mental well-being. Hear about Aiberry’s innovative chatbot “Botberry” and how it helps provide insights into the user’s mental health. We also get into the weeds and unpack how Aiberry develops its models, data source challenges, the value of custom models, mitigating model biases, and much more! Our guests also provide invaluable advice for other startups and share their future vision for the company. Tune in and discover AI technology at the forefront of mental health innovation with Linda Chung and Michael Mullarkey from Aiberry!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Linda’s healthcare background and motivation for starting Aiberry.</li><li>Michael's transition from clinical psychology to AI at Aiberry.</li><li>Aiberry's AI-powered mental health assessment platform and its unique approach.</li><li>The role of machine learning in Aiberry's technology.</li><li>Model development, data collection challenges, and custom model creation.</li><li>Addressing bias in models trained on patient interview data.</li><li>Measuring impact and success metrics at Aiberry</li><li>Advice for leaders of AI-powered startups.</li><li>The vision for Aiberry's impact in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We know that early detection leads to early intervention and better outcomes.” — Linda Chung </p><p><br></p><p>“Our models take the messy, natural human way that people talk about their mental health, and we turn it into systematic data that are necessary for the healthcare industry and report it back to the user.” — Linda Chung</p><p><br></p><p>“As a health tech company, we have to take the health and the tech elements of our business equally seriously. So, one of our guiding principles from a health perspective is we have to keep people's data secure.” — Michael Mullarkey</p><p><br></p><p>“We really value letting people talk about their mental health in their own words, and that can lead to some unexpected outcomes on the modeling side of the operation.” — Michael Mullarkey</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/linda-chung-905733146/">Linda Chung on LinkedIn</a></p><p><a href="https://www.linkedin.com/in/mcmullarkey/">Michael Mullarkey on LinkedIn</a></p><p><a href="https://aiberry.io">Aiberry</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 04 Mar 2024 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/a5a80bcb/a2410b69.mp3" length="27656023" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/5JNNJguQ6qUMo2-_kjbSJ5Bdeo7q8vk68BbFVk1m0-c/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE2NzYwNTAv/MTcwNDU3MzYyMy1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1151</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Joining me today are Linda Chung and Michael Mullarkey to discuss the transformative potential of AI in mental health care. Linda is the co-CEO and Co-Founder of Aiberry, a groundbreaking AI company redefining mental healthcare accessibility. With a background in speech-language pathology, Linda pioneered telehealth services and now leads Aiberry in leveraging innovative technology for objective mental health screenings. Michael, the Senior Clinical Data Scientist at Aiberry, is dedicated to translating complex data science into tangible human value. His unique background in clinical psychology merged with a passion for coding drives his mission to address pressing human concerns through data.</p><p>In our conversation, we explore the fascinating intersection of clinical expertise and artificial intelligence, unlocking personalized insights and proactive strategies for mental well-being. Hear about Aiberry’s innovative chatbot “Botberry” and how it helps provide insights into the user’s mental health. We also get into the weeds and unpack how Aiberry develops its models, data source challenges, the value of custom models, mitigating model biases, and much more! Our guests also provide invaluable advice for other startups and share their future vision for the company. Tune in and discover AI technology at the forefront of mental health innovation with Linda Chung and Michael Mullarkey from Aiberry!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Linda’s healthcare background and motivation for starting Aiberry.</li><li>Michael's transition from clinical psychology to AI at Aiberry.</li><li>Aiberry's AI-powered mental health assessment platform and its unique approach.</li><li>The role of machine learning in Aiberry's technology.</li><li>Model development, data collection challenges, and custom model creation.</li><li>Addressing bias in models trained on patient interview data.</li><li>Measuring impact and success metrics at Aiberry</li><li>Advice for leaders of AI-powered startups.</li><li>The vision for Aiberry's impact in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We know that early detection leads to early intervention and better outcomes.” — Linda Chung </p><p><br></p><p>“Our models take the messy, natural human way that people talk about their mental health, and we turn it into systematic data that are necessary for the healthcare industry and report it back to the user.” — Linda Chung</p><p><br></p><p>“As a health tech company, we have to take the health and the tech elements of our business equally seriously. So, one of our guiding principles from a health perspective is we have to keep people's data secure.” — Michael Mullarkey</p><p><br></p><p>“We really value letting people talk about their mental health in their own words, and that can lead to some unexpected outcomes on the modeling side of the operation.” — Michael Mullarkey</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/linda-chung-905733146/">Linda Chung on LinkedIn</a></p><p><a href="https://www.linkedin.com/in/mcmullarkey/">Michael Mullarkey on LinkedIn</a></p><p><a href="https://aiberry.io">Aiberry</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, mental health, healthcare</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/a5a80bcb/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Revitalizing Forests with Guy Bayes from Vibrant Planet</title>
      <itunes:episode>71</itunes:episode>
      <podcast:episode>71</podcast:episode>
      <itunes:title>Revitalizing Forests with Guy Bayes from Vibrant Planet</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">88a5810b-fbfa-4717-bb95-77986b242be2</guid>
      <link>https://pixelscientia.com/podcast/revitalizing-forests-with-guy-bayes-from-vibrant-planet/</link>
      <description>
        <![CDATA[<p>Machine learning can be used as an innovative method to contribute to climate change resiliency. Today on Impact AI, I am joined by the co-founder and CTO of Vibrant Planet, Guy Bayes, to discuss how they are using AI to revitalize forests. Listening in, you’ll hear all about our guest’s background, why he started Vibrant Planet, what the company does, how they apply machine learning to their work, and a breakdown of how they collect the four sets of data they need. We delve into any problem areas they face in their individual and integrated data types before Guy tells us how they cross-validate their models. We even talk about how the teams collaborate, how machine learning and forest knowledge come together, and where he sees the company in the next three to five years. Finally, our guest shares some pearls of wisdom for any leaders of AI-powered startups.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A warm introduction to today’s guest, Guy Bayes. </li><li>Guy tells us about his background and what led him to create Vibrant Planet. </li><li>What Vibrant Planet does and how it contributes to climate change resiliency. </li><li>How Vibrant Planet applies machine learning to the work.</li><li>A breakdown of the four sets of data they need and how they collect it. </li><li>The challenges they face when it comes to collecting and integrating all their data. </li><li>How Guy makes sure that their models work in different geographic regions. </li><li>Incorporating forest knowledge into data modeling and machine learning development. </li><li>How the Vibrant Planet teams work together and collaborate to achieve their goal. </li><li>What Vibrant Planet does to measure the impact of this technology. </li><li>New AI advancements Guy is particularly excited about for Vibrant Planet. </li><li>Guy shares some advice for leaders of AI-powered startups.</li><li>Where he sees Vibrant Planet’s impact in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Getting the forest back into a state that's more able to tolerate fire and more able to produce low-intensity fire rather than high-intensity fire is [Vibrant Planet’s] goal.” — Guy Bayes</p><p><br></p><p>“We have – not only super good engineers but also very talented ecological scientists and people that have done physical hands-on forestry for their careers. – This mix of those three personas – work together pretty harmoniously actually because we all share a common goal.” — Guy Bayes</p><p><br></p><p>“I don't think you can ever find one person who has all that in their head, but you can find a team that does.” — Guy Bayes</p><p><br></p><p>“You will not have an impact without having a combined team that all respects each other and brings different things to the table.” — Guy Bayes</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/guy-bayes-53a72a/">Guy Bayes on LinkedIn</a></p><p><a href="https://www.vibrantplanet.net/">Vibrant Planet</a></p><p><a href="https://www.linkedin.com/company/vibrant-planet/">Vibrant Planet on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Machine learning can be used as an innovative method to contribute to climate change resiliency. Today on Impact AI, I am joined by the co-founder and CTO of Vibrant Planet, Guy Bayes, to discuss how they are using AI to revitalize forests. Listening in, you’ll hear all about our guest’s background, why he started Vibrant Planet, what the company does, how they apply machine learning to their work, and a breakdown of how they collect the four sets of data they need. We delve into any problem areas they face in their individual and integrated data types before Guy tells us how they cross-validate their models. We even talk about how the teams collaborate, how machine learning and forest knowledge come together, and where he sees the company in the next three to five years. Finally, our guest shares some pearls of wisdom for any leaders of AI-powered startups.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A warm introduction to today’s guest, Guy Bayes. </li><li>Guy tells us about his background and what led him to create Vibrant Planet. </li><li>What Vibrant Planet does and how it contributes to climate change resiliency. </li><li>How Vibrant Planet applies machine learning to the work.</li><li>A breakdown of the four sets of data they need and how they collect it. </li><li>The challenges they face when it comes to collecting and integrating all their data. </li><li>How Guy makes sure that their models work in different geographic regions. </li><li>Incorporating forest knowledge into data modeling and machine learning development. </li><li>How the Vibrant Planet teams work together and collaborate to achieve their goal. </li><li>What Vibrant Planet does to measure the impact of this technology. </li><li>New AI advancements Guy is particularly excited about for Vibrant Planet. </li><li>Guy shares some advice for leaders of AI-powered startups.</li><li>Where he sees Vibrant Planet’s impact in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Getting the forest back into a state that's more able to tolerate fire and more able to produce low-intensity fire rather than high-intensity fire is [Vibrant Planet’s] goal.” — Guy Bayes</p><p><br></p><p>“We have – not only super good engineers but also very talented ecological scientists and people that have done physical hands-on forestry for their careers. – This mix of those three personas – work together pretty harmoniously actually because we all share a common goal.” — Guy Bayes</p><p><br></p><p>“I don't think you can ever find one person who has all that in their head, but you can find a team that does.” — Guy Bayes</p><p><br></p><p>“You will not have an impact without having a combined team that all respects each other and brings different things to the table.” — Guy Bayes</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/guy-bayes-53a72a/">Guy Bayes on LinkedIn</a></p><p><a href="https://www.vibrantplanet.net/">Vibrant Planet</a></p><p><a href="https://www.linkedin.com/company/vibrant-planet/">Vibrant Planet on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 26 Feb 2024 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/22d69cf1/4bd08ca3.mp3" length="25655802" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/ROv3G9YteE-AIMh9mGLB0QC5HS4oNm3ptBhhTq4zxRw/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE3MjY4NzEv/MTcwNzU5Mjg3NS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1594</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Machine learning can be used as an innovative method to contribute to climate change resiliency. Today on Impact AI, I am joined by the co-founder and CTO of Vibrant Planet, Guy Bayes, to discuss how they are using AI to revitalize forests. Listening in, you’ll hear all about our guest’s background, why he started Vibrant Planet, what the company does, how they apply machine learning to their work, and a breakdown of how they collect the four sets of data they need. We delve into any problem areas they face in their individual and integrated data types before Guy tells us how they cross-validate their models. We even talk about how the teams collaborate, how machine learning and forest knowledge come together, and where he sees the company in the next three to five years. Finally, our guest shares some pearls of wisdom for any leaders of AI-powered startups.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A warm introduction to today’s guest, Guy Bayes. </li><li>Guy tells us about his background and what led him to create Vibrant Planet. </li><li>What Vibrant Planet does and how it contributes to climate change resiliency. </li><li>How Vibrant Planet applies machine learning to the work.</li><li>A breakdown of the four sets of data they need and how they collect it. </li><li>The challenges they face when it comes to collecting and integrating all their data. </li><li>How Guy makes sure that their models work in different geographic regions. </li><li>Incorporating forest knowledge into data modeling and machine learning development. </li><li>How the Vibrant Planet teams work together and collaborate to achieve their goal. </li><li>What Vibrant Planet does to measure the impact of this technology. </li><li>New AI advancements Guy is particularly excited about for Vibrant Planet. </li><li>Guy shares some advice for leaders of AI-powered startups.</li><li>Where he sees Vibrant Planet’s impact in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Getting the forest back into a state that's more able to tolerate fire and more able to produce low-intensity fire rather than high-intensity fire is [Vibrant Planet’s] goal.” — Guy Bayes</p><p><br></p><p>“We have – not only super good engineers but also very talented ecological scientists and people that have done physical hands-on forestry for their careers. – This mix of those three personas – work together pretty harmoniously actually because we all share a common goal.” — Guy Bayes</p><p><br></p><p>“I don't think you can ever find one person who has all that in their head, but you can find a team that does.” — Guy Bayes</p><p><br></p><p>“You will not have an impact without having a combined team that all respects each other and brings different things to the table.” — Guy Bayes</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/guy-bayes-53a72a/">Guy Bayes on LinkedIn</a></p><p><a href="https://www.vibrantplanet.net/">Vibrant Planet</a></p><p><a href="https://www.linkedin.com/company/vibrant-planet/">Vibrant Planet on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, remote sensing, satellite, forestry</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/22d69cf1/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Unlocking Blood Cell Morphology with Erez Naaman from Scopio Labs</title>
      <itunes:episode>70</itunes:episode>
      <podcast:episode>70</podcast:episode>
      <itunes:title>Unlocking Blood Cell Morphology with Erez Naaman from Scopio Labs</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">510445e9-240c-422e-89c9-8948dc35ea36</guid>
      <link>https://pixelscientia.com/podcast/unlocking-blood-cell-morphology-with-erez-naaman-from-scopio-labs/</link>
      <description>
        <![CDATA[<p>In this episode, I sit down with Erez Naaman, co-founder and CTO of Scopio Labs, to delve into the transformative potential of AI in healthcare, particularly in blood cell morphology analysis. Erez shares the intriguing journey behind the inception of Scopio Labs which was driven by a desire to revolutionize healthcare practices. Discover how Scopio Labs' platforms digitize and streamline the process of blood cell analysis and the pivotal role of machine learning in distinguishing and classifying various cell types. Gain insights into the significance of data collection and algorithm development, the evolution of AI infrastructure over the past decade, regulatory considerations on product development, and more. He also shares invaluable insights for AI startup leaders, the future trajectory of Scopio Labs, and the profound impact envisioned for the healthcare landscape. Join me as we explore the intersection of AI and healthcare innovation with Erez Naaman.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Eres shares his professional background and his path to founding Scopio Labs.</li><li>Revolutionizing healthcare through AI-driven blood cell morphology analysis.</li><li>The pivotal role machine learning plays in distinguishing and classifying various cell types.</li><li>Discover the challenges of working with blood smear images; particularly for training models.</li><li>Learn about the differences between regulated and nonregulated machine learning.</li><li>AI infrastructure development and the associated regulatory considerations.</li><li>Explore his approach to developing new machine learning products or features. </li><li>Hear why he chooses to prioritize the end-user experience during development.</li><li>Advice for budding entrepreneurs and the future trajectory of Scopio Labs.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“In terms of the approach [to AI], I think we saw it the same way that we do today in terms of its importance but I think that the infrastructure for using ML has greatly evolved.” — Erez Naaman</p><p><br></p><p>“Getting a large enough data set to get a reliable classification on specific more rare cell types is the most difficult problem in my opinion.” — Erez Naaman</p><p><br></p><p>“In a way, we look at it backward. Machine learning is a tool and not a goal. So, we always start with the patient in mind or the user.” — Erez Naaman</p><p><br></p><p>“Everyone is dealing with AI and so the front runners are clearly becoming the leaders with time. So, it is much easier to choose the right tools for every task as time progresses.” — Erez Naaman</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/erez-naaman-8654972/">Erez Naaman on LinkedIn</a></p><p><a href="https://scopiolabs.com">Scopio Labs</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I sit down with Erez Naaman, co-founder and CTO of Scopio Labs, to delve into the transformative potential of AI in healthcare, particularly in blood cell morphology analysis. Erez shares the intriguing journey behind the inception of Scopio Labs which was driven by a desire to revolutionize healthcare practices. Discover how Scopio Labs' platforms digitize and streamline the process of blood cell analysis and the pivotal role of machine learning in distinguishing and classifying various cell types. Gain insights into the significance of data collection and algorithm development, the evolution of AI infrastructure over the past decade, regulatory considerations on product development, and more. He also shares invaluable insights for AI startup leaders, the future trajectory of Scopio Labs, and the profound impact envisioned for the healthcare landscape. Join me as we explore the intersection of AI and healthcare innovation with Erez Naaman.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Eres shares his professional background and his path to founding Scopio Labs.</li><li>Revolutionizing healthcare through AI-driven blood cell morphology analysis.</li><li>The pivotal role machine learning plays in distinguishing and classifying various cell types.</li><li>Discover the challenges of working with blood smear images; particularly for training models.</li><li>Learn about the differences between regulated and nonregulated machine learning.</li><li>AI infrastructure development and the associated regulatory considerations.</li><li>Explore his approach to developing new machine learning products or features. </li><li>Hear why he chooses to prioritize the end-user experience during development.</li><li>Advice for budding entrepreneurs and the future trajectory of Scopio Labs.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“In terms of the approach [to AI], I think we saw it the same way that we do today in terms of its importance but I think that the infrastructure for using ML has greatly evolved.” — Erez Naaman</p><p><br></p><p>“Getting a large enough data set to get a reliable classification on specific more rare cell types is the most difficult problem in my opinion.” — Erez Naaman</p><p><br></p><p>“In a way, we look at it backward. Machine learning is a tool and not a goal. So, we always start with the patient in mind or the user.” — Erez Naaman</p><p><br></p><p>“Everyone is dealing with AI and so the front runners are clearly becoming the leaders with time. So, it is much easier to choose the right tools for every task as time progresses.” — Erez Naaman</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/erez-naaman-8654972/">Erez Naaman on LinkedIn</a></p><p><a href="https://scopiolabs.com">Scopio Labs</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 19 Feb 2024 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/10d81b6c/54740d28.mp3" length="15946980" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/XgKOdXHXNALOrHvEAysN2s-4VKCtqze6Q8Repep5_pE/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE3MjY4Njkv/MTcwNzU5Mjc1NC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>988</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode, I sit down with Erez Naaman, co-founder and CTO of Scopio Labs, to delve into the transformative potential of AI in healthcare, particularly in blood cell morphology analysis. Erez shares the intriguing journey behind the inception of Scopio Labs which was driven by a desire to revolutionize healthcare practices. Discover how Scopio Labs' platforms digitize and streamline the process of blood cell analysis and the pivotal role of machine learning in distinguishing and classifying various cell types. Gain insights into the significance of data collection and algorithm development, the evolution of AI infrastructure over the past decade, regulatory considerations on product development, and more. He also shares invaluable insights for AI startup leaders, the future trajectory of Scopio Labs, and the profound impact envisioned for the healthcare landscape. Join me as we explore the intersection of AI and healthcare innovation with Erez Naaman.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Eres shares his professional background and his path to founding Scopio Labs.</li><li>Revolutionizing healthcare through AI-driven blood cell morphology analysis.</li><li>The pivotal role machine learning plays in distinguishing and classifying various cell types.</li><li>Discover the challenges of working with blood smear images; particularly for training models.</li><li>Learn about the differences between regulated and nonregulated machine learning.</li><li>AI infrastructure development and the associated regulatory considerations.</li><li>Explore his approach to developing new machine learning products or features. </li><li>Hear why he chooses to prioritize the end-user experience during development.</li><li>Advice for budding entrepreneurs and the future trajectory of Scopio Labs.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“In terms of the approach [to AI], I think we saw it the same way that we do today in terms of its importance but I think that the infrastructure for using ML has greatly evolved.” — Erez Naaman</p><p><br></p><p>“Getting a large enough data set to get a reliable classification on specific more rare cell types is the most difficult problem in my opinion.” — Erez Naaman</p><p><br></p><p>“In a way, we look at it backward. Machine learning is a tool and not a goal. So, we always start with the patient in mind or the user.” — Erez Naaman</p><p><br></p><p>“Everyone is dealing with AI and so the front runners are clearly becoming the leaders with time. So, it is much easier to choose the right tools for every task as time progresses.” — Erez Naaman</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/erez-naaman-8654972/">Erez Naaman on LinkedIn</a></p><p><a href="https://scopiolabs.com">Scopio Labs</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, computer vision, medical imaging, cell morphology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/10d81b6c/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Unlocking Conversational Healthcare Data with Amy Brown from Authenticx</title>
      <itunes:episode>69</itunes:episode>
      <podcast:episode>69</podcast:episode>
      <itunes:title>Unlocking Conversational Healthcare Data with Amy Brown from Authenticx</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">e44c3689-0605-46b5-9c82-d3ad86d605f9</guid>
      <link>https://pixelscientia.com/podcast/unlocking-conversational-healthcare-data-with-amy-brown-from-authenticx/</link>
      <description>
        <![CDATA[<p>Customer service calls often start and end at the operator’s headset, but there is so much untapped data from these conversations that could be used to improve business systems on a holistic level. Today’s guest, Amy Brown has seen the value of unlocking conversational data to improve healthcare systems across the country, and as the Founder and CEO of Authenticx, she has taken giant strides towards accomplishing this goal.</p><p>Authenticx is an AI-powered platform that makes it possible for healthcare organizations to have a single source of conversational data, creating powerful and immersive customer insight analysis that informs business decisions. In today’s conversation, Amy explains why she founded Authenticx, what the company does, and why her business is important for healthcare.  We also learn about how the company uses machine learning in its processes, the challenges of working with conversational data, how Authenticx upholds a high ethical standard, and how the impact of its technology can be measured across healthcare systems nationwide. After sharing some important advice for other leaders of AI-powered startups, Amy explains why Authenticx will be a key player in healthcare for the foreseeable future. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A warm welcome to the Founder and CEO of Authenticx, Amy Brown. </li><li>Amy’s professional background, and how she ended up founding Authenticx. </li><li>What Authenticx does and why the company is important for healthcare. </li><li>How the company uses machine learning to get better insights from conversational data.  </li><li>A closer look at the conversational data that Authenticx works with. </li><li>The challenges of working with and training models on conversational data.  </li><li>Other ways that they validate their models. </li><li>Mitigating biases and upholding ethics. </li><li>How Amy measures the impact of Authenticx’s technology.</li><li>Her advice to other leaders of AI-powered startups. </li><li>Where Authenticx will be in the next three to five years, according to Amy.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“That’s really what I’m trying to get at; using technology to help explain customer and consumer perception of their care, and using that; putting that to work for the healthcare industry so it can start to improve its systems in a way that allows patients and consumers to actually get a better outcome.” — Amy Brown</p><p><br></p><p>“Our data team has had to become extremely proficient at dealing with all kinds of messy data.” — Amy Brown</p><p><br></p><p>“We’ve hired a diverse group of human beings because we want to make sure that we’re inclusive in our interpretations of what’s happening in these conversations.” — Amy Brown</p><p><br></p><p>“You can never eliminate all bias – we would never purport of doing that – but we can be very intentional about how we train the data.” — Amy Brown</p><p><br></p><p>“[The] dream scenario is that the healthcare system in this country starts to make room for and evolve in how it makes its business decisions to include the voices of their customers as a key source of insight, intel, and data.” — Amy Brown</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/amy-brown-84821210/">Amy Brown on LinkedIn</a></p><p><a href="https://twitter.com/AmyAuthenticx">Amy Brown on X</a></p><p><a href="https://authenticx.com/">Authenticx</a></p><p><a href="https://www.instagram.com/be_authenticx/">Authenticx on Instagram</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Customer service calls often start and end at the operator’s headset, but there is so much untapped data from these conversations that could be used to improve business systems on a holistic level. Today’s guest, Amy Brown has seen the value of unlocking conversational data to improve healthcare systems across the country, and as the Founder and CEO of Authenticx, she has taken giant strides towards accomplishing this goal.</p><p>Authenticx is an AI-powered platform that makes it possible for healthcare organizations to have a single source of conversational data, creating powerful and immersive customer insight analysis that informs business decisions. In today’s conversation, Amy explains why she founded Authenticx, what the company does, and why her business is important for healthcare.  We also learn about how the company uses machine learning in its processes, the challenges of working with conversational data, how Authenticx upholds a high ethical standard, and how the impact of its technology can be measured across healthcare systems nationwide. After sharing some important advice for other leaders of AI-powered startups, Amy explains why Authenticx will be a key player in healthcare for the foreseeable future. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A warm welcome to the Founder and CEO of Authenticx, Amy Brown. </li><li>Amy’s professional background, and how she ended up founding Authenticx. </li><li>What Authenticx does and why the company is important for healthcare. </li><li>How the company uses machine learning to get better insights from conversational data.  </li><li>A closer look at the conversational data that Authenticx works with. </li><li>The challenges of working with and training models on conversational data.  </li><li>Other ways that they validate their models. </li><li>Mitigating biases and upholding ethics. </li><li>How Amy measures the impact of Authenticx’s technology.</li><li>Her advice to other leaders of AI-powered startups. </li><li>Where Authenticx will be in the next three to five years, according to Amy.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“That’s really what I’m trying to get at; using technology to help explain customer and consumer perception of their care, and using that; putting that to work for the healthcare industry so it can start to improve its systems in a way that allows patients and consumers to actually get a better outcome.” — Amy Brown</p><p><br></p><p>“Our data team has had to become extremely proficient at dealing with all kinds of messy data.” — Amy Brown</p><p><br></p><p>“We’ve hired a diverse group of human beings because we want to make sure that we’re inclusive in our interpretations of what’s happening in these conversations.” — Amy Brown</p><p><br></p><p>“You can never eliminate all bias – we would never purport of doing that – but we can be very intentional about how we train the data.” — Amy Brown</p><p><br></p><p>“[The] dream scenario is that the healthcare system in this country starts to make room for and evolve in how it makes its business decisions to include the voices of their customers as a key source of insight, intel, and data.” — Amy Brown</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/amy-brown-84821210/">Amy Brown on LinkedIn</a></p><p><a href="https://twitter.com/AmyAuthenticx">Amy Brown on X</a></p><p><a href="https://authenticx.com/">Authenticx</a></p><p><a href="https://www.instagram.com/be_authenticx/">Authenticx on Instagram</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 12 Feb 2024 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/94b9c439/9fcc3a3a.mp3" length="19160018" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/RDaHO3zh7q1_YWcm28CBOdKG0LTVAgxnvIuEXg_5pFc/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE2NzYwNDgv/MTcwNDU3MzYzNy1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1193</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Customer service calls often start and end at the operator’s headset, but there is so much untapped data from these conversations that could be used to improve business systems on a holistic level. Today’s guest, Amy Brown has seen the value of unlocking conversational data to improve healthcare systems across the country, and as the Founder and CEO of Authenticx, she has taken giant strides towards accomplishing this goal.</p><p>Authenticx is an AI-powered platform that makes it possible for healthcare organizations to have a single source of conversational data, creating powerful and immersive customer insight analysis that informs business decisions. In today’s conversation, Amy explains why she founded Authenticx, what the company does, and why her business is important for healthcare.  We also learn about how the company uses machine learning in its processes, the challenges of working with conversational data, how Authenticx upholds a high ethical standard, and how the impact of its technology can be measured across healthcare systems nationwide. After sharing some important advice for other leaders of AI-powered startups, Amy explains why Authenticx will be a key player in healthcare for the foreseeable future. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A warm welcome to the Founder and CEO of Authenticx, Amy Brown. </li><li>Amy’s professional background, and how she ended up founding Authenticx. </li><li>What Authenticx does and why the company is important for healthcare. </li><li>How the company uses machine learning to get better insights from conversational data.  </li><li>A closer look at the conversational data that Authenticx works with. </li><li>The challenges of working with and training models on conversational data.  </li><li>Other ways that they validate their models. </li><li>Mitigating biases and upholding ethics. </li><li>How Amy measures the impact of Authenticx’s technology.</li><li>Her advice to other leaders of AI-powered startups. </li><li>Where Authenticx will be in the next three to five years, according to Amy.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“That’s really what I’m trying to get at; using technology to help explain customer and consumer perception of their care, and using that; putting that to work for the healthcare industry so it can start to improve its systems in a way that allows patients and consumers to actually get a better outcome.” — Amy Brown</p><p><br></p><p>“Our data team has had to become extremely proficient at dealing with all kinds of messy data.” — Amy Brown</p><p><br></p><p>“We’ve hired a diverse group of human beings because we want to make sure that we’re inclusive in our interpretations of what’s happening in these conversations.” — Amy Brown</p><p><br></p><p>“You can never eliminate all bias – we would never purport of doing that – but we can be very intentional about how we train the data.” — Amy Brown</p><p><br></p><p>“[The] dream scenario is that the healthcare system in this country starts to make room for and evolve in how it makes its business decisions to include the voices of their customers as a key source of insight, intel, and data.” — Amy Brown</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/amy-brown-84821210/">Amy Brown on LinkedIn</a></p><p><a href="https://twitter.com/AmyAuthenticx">Amy Brown on X</a></p><p><a href="https://authenticx.com/">Authenticx</a></p><p><a href="https://www.instagram.com/be_authenticx/">Authenticx on Instagram</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, healthcare, customer service</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/94b9c439/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Decoding the Human Microbiome with Guru Banavar from Viome</title>
      <itunes:episode>68</itunes:episode>
      <podcast:episode>68</podcast:episode>
      <itunes:title>Decoding the Human Microbiome with Guru Banavar from Viome</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">e2472d93-5092-4739-bfc8-1a8fa3b6c79e</guid>
      <link>https://pixelscientia.com/podcast/decoding-the-human-micobiome-with-guru-banavar-from-viome/</link>
      <description>
        <![CDATA[<p>Using biological intelligence, human intelligence, and artificial intelligence, the company in the spotlight today aims to demystify health, make science accessible, and honor the biochemical individuality of every human.</p><p>Today on Impact AI, I am joined by the founding CTO and Head of Discovery AI at Viome, Guru Banavar! He is here to talk all about AI and the human microbiome. As you tune in, you’ll hear about Guru’s background and what led to the creation of Viome, including what they do and why their work is crucial to chronic disease. He unpacks their use of machine learning to turn RNA data into insights for their customers, the challenges they face in training models for the work they do, and Guru sheds light on the early steps of their process for planning and developing new machine-learning products or features.  Be sure not to miss out on this insightful conversation about how Guru and the team at Viome are working to decode the human microbiome.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Learn about Guru’s background and what led to the creation of Viome.</li><li>What Viome does and why it’s important for chronic disease.</li><li>Using machine learning to turn RNA data into insights for customers at Viome.</li><li>Guru highlights the challenges they face in training models based off of the work with RNA data and the large data set they’ve collected from customers. </li><li>He unpacks the early steps in the process of planning and developing a new machine-learning product or feature.</li><li>We talk about technological advancements that made it possible to build their technology. </li><li>Guru’s advice to other leaders of AI-powered startups.</li><li>His thoughts on the impact of Viome in the next 3-5 years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“At some point in time, I decided that the impact that I wanted to make in the field of computational biology, life sciences, and healthcare could be done only if I joined a few of my friends from the broader community, and started a new company — [Viome].” — Guru Banavar</p><p><br></p><p>“I am one of those AI people who believes that you first focus on the problem, and you bring all of the tools you need to solve the problem. AI, to me, is not just one thing, like the latest buzzword. For me, AI is an ML, a set of tools, and you take the right tool for the right problem.” — Guru Banavar</p><p><br></p><p>“One of our core intellectual property elements is the meta-transcriptomic laboratory technology, which essentially, isolates, detects, and processes what we call the informative RNA molecules in any given sample. That required a number of sort of biochemistry-level technology breakthroughs.” — Guru Banavar</p><p><br></p><p>“I would advise other leaders of AI-powered startups to be very careful about how you pick your solution toolset, based upon the problem that you want to solve.” — Guru Banavar</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/banavar/">Guruduth Banavar on LinkedIn</a></p><p><a href="https://twitter.com/banavar">Guruduth Banavar on X</a></p><p><a href="https://www.viome.com/">Viome</a></p><p><a href="https://www.viome.com/blog">Viome Blog</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Using biological intelligence, human intelligence, and artificial intelligence, the company in the spotlight today aims to demystify health, make science accessible, and honor the biochemical individuality of every human.</p><p>Today on Impact AI, I am joined by the founding CTO and Head of Discovery AI at Viome, Guru Banavar! He is here to talk all about AI and the human microbiome. As you tune in, you’ll hear about Guru’s background and what led to the creation of Viome, including what they do and why their work is crucial to chronic disease. He unpacks their use of machine learning to turn RNA data into insights for their customers, the challenges they face in training models for the work they do, and Guru sheds light on the early steps of their process for planning and developing new machine-learning products or features.  Be sure not to miss out on this insightful conversation about how Guru and the team at Viome are working to decode the human microbiome.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Learn about Guru’s background and what led to the creation of Viome.</li><li>What Viome does and why it’s important for chronic disease.</li><li>Using machine learning to turn RNA data into insights for customers at Viome.</li><li>Guru highlights the challenges they face in training models based off of the work with RNA data and the large data set they’ve collected from customers. </li><li>He unpacks the early steps in the process of planning and developing a new machine-learning product or feature.</li><li>We talk about technological advancements that made it possible to build their technology. </li><li>Guru’s advice to other leaders of AI-powered startups.</li><li>His thoughts on the impact of Viome in the next 3-5 years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“At some point in time, I decided that the impact that I wanted to make in the field of computational biology, life sciences, and healthcare could be done only if I joined a few of my friends from the broader community, and started a new company — [Viome].” — Guru Banavar</p><p><br></p><p>“I am one of those AI people who believes that you first focus on the problem, and you bring all of the tools you need to solve the problem. AI, to me, is not just one thing, like the latest buzzword. For me, AI is an ML, a set of tools, and you take the right tool for the right problem.” — Guru Banavar</p><p><br></p><p>“One of our core intellectual property elements is the meta-transcriptomic laboratory technology, which essentially, isolates, detects, and processes what we call the informative RNA molecules in any given sample. That required a number of sort of biochemistry-level technology breakthroughs.” — Guru Banavar</p><p><br></p><p>“I would advise other leaders of AI-powered startups to be very careful about how you pick your solution toolset, based upon the problem that you want to solve.” — Guru Banavar</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/banavar/">Guruduth Banavar on LinkedIn</a></p><p><a href="https://twitter.com/banavar">Guruduth Banavar on X</a></p><p><a href="https://www.viome.com/">Viome</a></p><p><a href="https://www.viome.com/blog">Viome Blog</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 05 Feb 2024 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/5f719ae0/eeae668c.mp3" length="36992652" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/ij5CSsrQyOrH786iI-X3izg2vj9d2vcZvwmLnKPl-5w/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE2NzYwNDUv/MTcwNDU2OTExMi1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>2303</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Using biological intelligence, human intelligence, and artificial intelligence, the company in the spotlight today aims to demystify health, make science accessible, and honor the biochemical individuality of every human.</p><p>Today on Impact AI, I am joined by the founding CTO and Head of Discovery AI at Viome, Guru Banavar! He is here to talk all about AI and the human microbiome. As you tune in, you’ll hear about Guru’s background and what led to the creation of Viome, including what they do and why their work is crucial to chronic disease. He unpacks their use of machine learning to turn RNA data into insights for their customers, the challenges they face in training models for the work they do, and Guru sheds light on the early steps of their process for planning and developing new machine-learning products or features.  Be sure not to miss out on this insightful conversation about how Guru and the team at Viome are working to decode the human microbiome.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Learn about Guru’s background and what led to the creation of Viome.</li><li>What Viome does and why it’s important for chronic disease.</li><li>Using machine learning to turn RNA data into insights for customers at Viome.</li><li>Guru highlights the challenges they face in training models based off of the work with RNA data and the large data set they’ve collected from customers. </li><li>He unpacks the early steps in the process of planning and developing a new machine-learning product or feature.</li><li>We talk about technological advancements that made it possible to build their technology. </li><li>Guru’s advice to other leaders of AI-powered startups.</li><li>His thoughts on the impact of Viome in the next 3-5 years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“At some point in time, I decided that the impact that I wanted to make in the field of computational biology, life sciences, and healthcare could be done only if I joined a few of my friends from the broader community, and started a new company — [Viome].” — Guru Banavar</p><p><br></p><p>“I am one of those AI people who believes that you first focus on the problem, and you bring all of the tools you need to solve the problem. AI, to me, is not just one thing, like the latest buzzword. For me, AI is an ML, a set of tools, and you take the right tool for the right problem.” — Guru Banavar</p><p><br></p><p>“One of our core intellectual property elements is the meta-transcriptomic laboratory technology, which essentially, isolates, detects, and processes what we call the informative RNA molecules in any given sample. That required a number of sort of biochemistry-level technology breakthroughs.” — Guru Banavar</p><p><br></p><p>“I would advise other leaders of AI-powered startups to be very careful about how you pick your solution toolset, based upon the problem that you want to solve.” — Guru Banavar</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/banavar/">Guruduth Banavar on LinkedIn</a></p><p><a href="https://twitter.com/banavar">Guruduth Banavar on X</a></p><p><a href="https://www.viome.com/">Viome</a></p><p><a href="https://www.viome.com/blog">Viome Blog</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, healthcare, gut microbiome, chronic disease</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/5f719ae0/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Optimizing Shipping with Konstantinos Kyriakopoulos from DeepSea</title>
      <itunes:episode>67</itunes:episode>
      <podcast:episode>67</podcast:episode>
      <itunes:title>Optimizing Shipping with Konstantinos Kyriakopoulos from DeepSea</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">85621049-924f-405a-993f-d3dda1fa930b</guid>
      <link>https://pixelscientia.com/podcast/optimizing-shipping-with-konstantinos-kyriakopoulos-from-deepsea/</link>
      <description>
        <![CDATA[<p>In this episode, I sit down with Konstantinos Kyriakopoulos, CEO of DeepSea, to discuss the transformative world of AI-powered shipping optimization. DeepSea focuses on enhancing vessel performance, fuel efficiency, and overall logistics management in the shipping and logistics industry. Konstantinos has been a key figure in advocating for digitalization in the maritime sector, pushing for technologies to streamline processes, cut costs, and reduce environmental impact.</p><p>In our conversation, Konstantinos shares the captivating journey behind DeepSea's inception, revealing how its AI-driven solutions emerged from a desire to revolutionize the shipping industry's efficiency and environmental impact. We explore the intricate use of machine learning to predict fuel consumption, optimize vessel operations, and navigate the shift toward decarbonization.</p><p>Gain insights into the intricacies of data architecture, the critical role of scalability, measuring impact, the future vision of the company, and much more. Don't miss out on discovering the cutting-edge applications of AI that are steering the shipping industry toward a more sustainable future with Konstantinos Kyriakopoulos. Tune in now!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background about Konstantinos and DeepSea's inception.</li><li>How AI is reshaping shipping efficiency and vessel operations.</li><li>The role of DeepSea in the shipping industry and mitigating climate change.</li><li>Insights into the challenges and hurdles of an evolving shipping industry.</li><li>How DeepSea leverages AI, inputs into the model, and the overall aim.</li><li>Approaches the company implements to ensure the integrity of its products.</li><li>Why the explainability of machine learning models is critical. </li><li>He shares DeepSea’s approach to model validation.</li><li>Measuring impact: CO2 reduction and cost savings for clients.</li><li>Konstantinos offers valuable advice for leaders of AI-powered startups.</li><li>What the company has planned for the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“If you really want to create impact, it’s not enough to just show people what’s happening and give them analytics, but you also have to, in some way, produce a tangible ROI.” — Konstantinos Kyriakopoulos</p><p><br></p><p>“The most important thing is to evaluate performance, so to make sure that the proof of performance is constantly being tested and you have good benchmarks and analytics.” — Konstantinos Kyriakopoulos</p><p><br></p><p>“It’s really important to also be able to check internally what is going on but also how the customer wants to see what’s created.” — Konstantinos Kyriakopoulos</p><p><br></p><p>“For us, the impact is actually very straightforward. It’s dollars and the metrics tonnes of CO2.” — Konstantinos Kyriakopoulos</p><p><br></p><p>“I think what I always say when people talk to me about starting an AI company is to focus on your data architecture early.” — Konstantinos Kyriakopoulos</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://meaningful.business/team/konstantinos-kyriakopoulos/">Konstantinos Kyriakopoulos</a></p><p><a href="https://deepsea.ai">DeepSea</a></p><p><a href="https://www.linkedin.com/company/deepsea-technologies/">DeepSea on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I sit down with Konstantinos Kyriakopoulos, CEO of DeepSea, to discuss the transformative world of AI-powered shipping optimization. DeepSea focuses on enhancing vessel performance, fuel efficiency, and overall logistics management in the shipping and logistics industry. Konstantinos has been a key figure in advocating for digitalization in the maritime sector, pushing for technologies to streamline processes, cut costs, and reduce environmental impact.</p><p>In our conversation, Konstantinos shares the captivating journey behind DeepSea's inception, revealing how its AI-driven solutions emerged from a desire to revolutionize the shipping industry's efficiency and environmental impact. We explore the intricate use of machine learning to predict fuel consumption, optimize vessel operations, and navigate the shift toward decarbonization.</p><p>Gain insights into the intricacies of data architecture, the critical role of scalability, measuring impact, the future vision of the company, and much more. Don't miss out on discovering the cutting-edge applications of AI that are steering the shipping industry toward a more sustainable future with Konstantinos Kyriakopoulos. Tune in now!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background about Konstantinos and DeepSea's inception.</li><li>How AI is reshaping shipping efficiency and vessel operations.</li><li>The role of DeepSea in the shipping industry and mitigating climate change.</li><li>Insights into the challenges and hurdles of an evolving shipping industry.</li><li>How DeepSea leverages AI, inputs into the model, and the overall aim.</li><li>Approaches the company implements to ensure the integrity of its products.</li><li>Why the explainability of machine learning models is critical. </li><li>He shares DeepSea’s approach to model validation.</li><li>Measuring impact: CO2 reduction and cost savings for clients.</li><li>Konstantinos offers valuable advice for leaders of AI-powered startups.</li><li>What the company has planned for the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“If you really want to create impact, it’s not enough to just show people what’s happening and give them analytics, but you also have to, in some way, produce a tangible ROI.” — Konstantinos Kyriakopoulos</p><p><br></p><p>“The most important thing is to evaluate performance, so to make sure that the proof of performance is constantly being tested and you have good benchmarks and analytics.” — Konstantinos Kyriakopoulos</p><p><br></p><p>“It’s really important to also be able to check internally what is going on but also how the customer wants to see what’s created.” — Konstantinos Kyriakopoulos</p><p><br></p><p>“For us, the impact is actually very straightforward. It’s dollars and the metrics tonnes of CO2.” — Konstantinos Kyriakopoulos</p><p><br></p><p>“I think what I always say when people talk to me about starting an AI company is to focus on your data architecture early.” — Konstantinos Kyriakopoulos</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://meaningful.business/team/konstantinos-kyriakopoulos/">Konstantinos Kyriakopoulos</a></p><p><a href="https://deepsea.ai">DeepSea</a></p><p><a href="https://www.linkedin.com/company/deepsea-technologies/">DeepSea on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 29 Jan 2024 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/16f32616/b0df7ea1.mp3" length="27876404" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/zEOVOyb0SPokrn4jlK0bMrGjxDzbQYZN858GauBicEA/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE2NzYwNDQv/MTcwNDU2ODk2Mi1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1157</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode, I sit down with Konstantinos Kyriakopoulos, CEO of DeepSea, to discuss the transformative world of AI-powered shipping optimization. DeepSea focuses on enhancing vessel performance, fuel efficiency, and overall logistics management in the shipping and logistics industry. Konstantinos has been a key figure in advocating for digitalization in the maritime sector, pushing for technologies to streamline processes, cut costs, and reduce environmental impact.</p><p>In our conversation, Konstantinos shares the captivating journey behind DeepSea's inception, revealing how its AI-driven solutions emerged from a desire to revolutionize the shipping industry's efficiency and environmental impact. We explore the intricate use of machine learning to predict fuel consumption, optimize vessel operations, and navigate the shift toward decarbonization.</p><p>Gain insights into the intricacies of data architecture, the critical role of scalability, measuring impact, the future vision of the company, and much more. Don't miss out on discovering the cutting-edge applications of AI that are steering the shipping industry toward a more sustainable future with Konstantinos Kyriakopoulos. Tune in now!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background about Konstantinos and DeepSea's inception.</li><li>How AI is reshaping shipping efficiency and vessel operations.</li><li>The role of DeepSea in the shipping industry and mitigating climate change.</li><li>Insights into the challenges and hurdles of an evolving shipping industry.</li><li>How DeepSea leverages AI, inputs into the model, and the overall aim.</li><li>Approaches the company implements to ensure the integrity of its products.</li><li>Why the explainability of machine learning models is critical. </li><li>He shares DeepSea’s approach to model validation.</li><li>Measuring impact: CO2 reduction and cost savings for clients.</li><li>Konstantinos offers valuable advice for leaders of AI-powered startups.</li><li>What the company has planned for the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“If you really want to create impact, it’s not enough to just show people what’s happening and give them analytics, but you also have to, in some way, produce a tangible ROI.” — Konstantinos Kyriakopoulos</p><p><br></p><p>“The most important thing is to evaluate performance, so to make sure that the proof of performance is constantly being tested and you have good benchmarks and analytics.” — Konstantinos Kyriakopoulos</p><p><br></p><p>“It’s really important to also be able to check internally what is going on but also how the customer wants to see what’s created.” — Konstantinos Kyriakopoulos</p><p><br></p><p>“For us, the impact is actually very straightforward. It’s dollars and the metrics tonnes of CO2.” — Konstantinos Kyriakopoulos</p><p><br></p><p>“I think what I always say when people talk to me about starting an AI company is to focus on your data architecture early.” — Konstantinos Kyriakopoulos</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://meaningful.business/team/konstantinos-kyriakopoulos/">Konstantinos Kyriakopoulos</a></p><p><a href="https://deepsea.ai">DeepSea</a></p><p><a href="https://www.linkedin.com/company/deepsea-technologies/">DeepSea on LinkedIn</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/foundation-model-assessment/">Foundation Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, emissions, shipping</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/16f32616/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Enhancing Sleep Care with Sam Rusk from EnsoData</title>
      <itunes:episode>66</itunes:episode>
      <podcast:episode>66</podcast:episode>
      <itunes:title>Enhancing Sleep Care with Sam Rusk from EnsoData</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">c48a166f-bb69-4010-bafc-edf37d5512d2</guid>
      <link>https://pixelscientia.com/podcast/enhancing-sleep-care-with-sam-rusk-from-ensodata/</link>
      <description>
        <![CDATA[<p>AI and machine learning have had a huge impact on the healthcare industry, but there are still plenty of advances to be made. Joining me today is Sam Rusk, Co-founder and CAIO of EnsoData, to talk about how their team is using machine learning to optimize sleep. Tuning in, you’ll learn about the founding of EnsoData, their implementation of ML, and the important role they play in the healthcare sector. We discuss the primary challenges of working with and training models on waveform data, EnsoData’s diagnostic processes, and how they use ML to process collected waveforms and identify therapy opportunities. Sam also shares his thoughts on how ML has developed since they first founded the company nine years ago, his advice for other leaders of AI-powered startups, and what his hopes are for EnsoData in the next five years. To learn how EnsoData is making waves in healthcare, be sure to listen in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Sam’s engineering and entrepreneurship background and EnsoData’s origin story.</li><li>What EnsoData does and why it’s important for healthcare.</li><li>Using ML to process collected waveforms and identify therapy opportunities.</li><li>Input and output models EnsoData uses to navigate the noise of tricky signal types.</li><li>Examples of what they are trying to predict with these models.</li><li>Diagnostic processes used in sleep medicine and the role of EnsoData.</li><li>Major challenges of working with and training models on waveform data.</li><li>Different approaches EnsoData has implemented to tackle generalizability.</li><li>Ways that the role of ML has evolved since EnsoData was founded nine years ago.</li><li>Insight into their team’s process for developing new products and features.</li><li>EnsoData’s place in the clinical workflow and how they assist doctors and patients.</li><li>Sam’s advice for other leaders of AI-powered startups.</li><li>What’s next for EnsoData and where you can go to learn more!</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We have a pretty mature process for taking feature ideas and moving them from the top of the funnel on product management all the way to testing and releasing those.” — Sam Rusk</p><p><br></p><p>“We spend a lot of our time solving not necessarily the machine learning performance side of the problem, but more ‘how do we get this into the clinicians’ hands in a way that makes sense for everyone.’” — Sam Rusk</p><p><br></p><p>“While we want to deliver products that change the game, we [also] invest heavily in research, and we are active in the community, publishing and engaging in the research community in sleep.” — Sam Rusk</p><p><strong>Links:<br></strong><a href="https://www.linkedin.com/in/sam-rusk-38626a56/">Sam Rusk on LinkedIn</a><br><a href="https://www.ensodata.com/">EnsoData</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/custom-vision-model-assessment/">Custom Vision Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>AI and machine learning have had a huge impact on the healthcare industry, but there are still plenty of advances to be made. Joining me today is Sam Rusk, Co-founder and CAIO of EnsoData, to talk about how their team is using machine learning to optimize sleep. Tuning in, you’ll learn about the founding of EnsoData, their implementation of ML, and the important role they play in the healthcare sector. We discuss the primary challenges of working with and training models on waveform data, EnsoData’s diagnostic processes, and how they use ML to process collected waveforms and identify therapy opportunities. Sam also shares his thoughts on how ML has developed since they first founded the company nine years ago, his advice for other leaders of AI-powered startups, and what his hopes are for EnsoData in the next five years. To learn how EnsoData is making waves in healthcare, be sure to listen in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Sam’s engineering and entrepreneurship background and EnsoData’s origin story.</li><li>What EnsoData does and why it’s important for healthcare.</li><li>Using ML to process collected waveforms and identify therapy opportunities.</li><li>Input and output models EnsoData uses to navigate the noise of tricky signal types.</li><li>Examples of what they are trying to predict with these models.</li><li>Diagnostic processes used in sleep medicine and the role of EnsoData.</li><li>Major challenges of working with and training models on waveform data.</li><li>Different approaches EnsoData has implemented to tackle generalizability.</li><li>Ways that the role of ML has evolved since EnsoData was founded nine years ago.</li><li>Insight into their team’s process for developing new products and features.</li><li>EnsoData’s place in the clinical workflow and how they assist doctors and patients.</li><li>Sam’s advice for other leaders of AI-powered startups.</li><li>What’s next for EnsoData and where you can go to learn more!</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We have a pretty mature process for taking feature ideas and moving them from the top of the funnel on product management all the way to testing and releasing those.” — Sam Rusk</p><p><br></p><p>“We spend a lot of our time solving not necessarily the machine learning performance side of the problem, but more ‘how do we get this into the clinicians’ hands in a way that makes sense for everyone.’” — Sam Rusk</p><p><br></p><p>“While we want to deliver products that change the game, we [also] invest heavily in research, and we are active in the community, publishing and engaging in the research community in sleep.” — Sam Rusk</p><p><strong>Links:<br></strong><a href="https://www.linkedin.com/in/sam-rusk-38626a56/">Sam Rusk on LinkedIn</a><br><a href="https://www.ensodata.com/">EnsoData</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/custom-vision-model-assessment/">Custom Vision Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 22 Jan 2024 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/59da413e/e06a5a09.mp3" length="22303637" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/mNllq41LRVFBYo_5BrLgR_jPOApO2Kl2L-TMkEqwNQg/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE2NTM0ODAv/MTcwMzEwNzQ2NC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>924</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>AI and machine learning have had a huge impact on the healthcare industry, but there are still plenty of advances to be made. Joining me today is Sam Rusk, Co-founder and CAIO of EnsoData, to talk about how their team is using machine learning to optimize sleep. Tuning in, you’ll learn about the founding of EnsoData, their implementation of ML, and the important role they play in the healthcare sector. We discuss the primary challenges of working with and training models on waveform data, EnsoData’s diagnostic processes, and how they use ML to process collected waveforms and identify therapy opportunities. Sam also shares his thoughts on how ML has developed since they first founded the company nine years ago, his advice for other leaders of AI-powered startups, and what his hopes are for EnsoData in the next five years. To learn how EnsoData is making waves in healthcare, be sure to listen in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Sam’s engineering and entrepreneurship background and EnsoData’s origin story.</li><li>What EnsoData does and why it’s important for healthcare.</li><li>Using ML to process collected waveforms and identify therapy opportunities.</li><li>Input and output models EnsoData uses to navigate the noise of tricky signal types.</li><li>Examples of what they are trying to predict with these models.</li><li>Diagnostic processes used in sleep medicine and the role of EnsoData.</li><li>Major challenges of working with and training models on waveform data.</li><li>Different approaches EnsoData has implemented to tackle generalizability.</li><li>Ways that the role of ML has evolved since EnsoData was founded nine years ago.</li><li>Insight into their team’s process for developing new products and features.</li><li>EnsoData’s place in the clinical workflow and how they assist doctors and patients.</li><li>Sam’s advice for other leaders of AI-powered startups.</li><li>What’s next for EnsoData and where you can go to learn more!</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We have a pretty mature process for taking feature ideas and moving them from the top of the funnel on product management all the way to testing and releasing those.” — Sam Rusk</p><p><br></p><p>“We spend a lot of our time solving not necessarily the machine learning performance side of the problem, but more ‘how do we get this into the clinicians’ hands in a way that makes sense for everyone.’” — Sam Rusk</p><p><br></p><p>“While we want to deliver products that change the game, we [also] invest heavily in research, and we are active in the community, publishing and engaging in the research community in sleep.” — Sam Rusk</p><p><strong>Links:<br></strong><a href="https://www.linkedin.com/in/sam-rusk-38626a56/">Sam Rusk on LinkedIn</a><br><a href="https://www.ensodata.com/">EnsoData</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/custom-vision-model-assessment/">Custom Vision Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, sleep</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/59da413e/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Democratizing Data-Driven Agriculture with Ranveer Chandra from Microsoft Research</title>
      <itunes:episode>65</itunes:episode>
      <podcast:episode>65</podcast:episode>
      <itunes:title>Democratizing Data-Driven Agriculture with Ranveer Chandra from Microsoft Research</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">a9dea663-7186-4222-a8e1-c36ad2280c7e</guid>
      <link>https://pixelscientia.com/podcast/democratizing-data-driven-agriculture-with-ranveer-chandra-from-microsoft-research/</link>
      <description>
        <![CDATA[<p>What if you were told that AI could improve agriculture, reduce climate change, and potentially solve global food insecurity? In this episode of Impact AI, I am joined by Ranveer Chandra from Microsoft Research to discuss his work in the world of agriculture. Tuning in, you’ll hear all about Ranveer’s career, how he got his agriculture idea picked up by Microsoft, data-driven agriculture, and more! We then delve into the data needed to achieve their goals before Ranveer discusses all the challenges they face when it comes to multimodal AI. Ranveer is very hopeful that machine learning can drastically improve agriculture. He tells me what new AI technologies he is most excited about, their potential impact on agriculture, and even shares advice for other leaders in AI. Finally, my guest warns us against the potential divide society can create if AI is not made accessible to all people. You don’t want to miss out on this informative and incredibly interesting episode so press play now!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing today’s guest, Ranveer Chandra.</li><li>A bit about Ranveer’s background and how he landed up at Microsoft Research. </li><li>How Microsoft got involved in agriculture. </li><li>Ranveer tells us about data-driven agriculture, what it means, and how he plans to achieve it. </li><li>The kinds of data they collect from farms in order to achieve these goals. </li><li>Challenges associated with multimodal AI.</li><li>How these technologies have been deployed so far.  </li><li>What new technology Ranveer is excited about in the world of machine learning.</li><li>Ranveer shares some advice for other leaders of AI-based products. </li><li>The potential impact of data-driven and AI technologies for agriculture in the future. </li><li>Ranveer warns us about the dangers of creating an AI-divide and what that would mean. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Technology could have a deep impact on agriculture. It could address the world's food problem; it could help improve livelihoods of a lot of smallholder farmers.” — Ranveer Chandra</p><p><br></p><p>“The key question is, how do you sustainably nourish the planet? How do you sustainably nourish the people in this world?” — Ranveer Chandra</p><p><br></p><p>“Microsoft is not an agriculture company. So we are not sending anything to farmers, but we are providing the tools on top of which you could build solutions for farmers, or partners, or customers build solutions and take the solutions to farmers.” — Ranveer Chandra</p><p><br></p><p>“We need to make data consumable, and generative AI has the suitability to make that data more consumable.” — Ranveer Chandra</p><p><br></p><p>“There are over 500 million smallholder farmers worldwide whose lives would benefit with artificial intelligence.” — Ranveer Chandra</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/ranveer-chandra-79bb9b/">Ranveer Chandra on LinkedIn</a></p><p><a href="https://twitter.com/RanveerChandra">Ranveer Chandra on X</a></p><p><a href="https://www.instagram.com/chandraranveer/">Ranveer Chandra on Instagram</a></p><p><a href="https://www.microsoft.com/en-us/research/people/ranveer/">Microsoft Research – Ranveer Chandra</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/custom-vision-model-assessment/">Custom Vision Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>What if you were told that AI could improve agriculture, reduce climate change, and potentially solve global food insecurity? In this episode of Impact AI, I am joined by Ranveer Chandra from Microsoft Research to discuss his work in the world of agriculture. Tuning in, you’ll hear all about Ranveer’s career, how he got his agriculture idea picked up by Microsoft, data-driven agriculture, and more! We then delve into the data needed to achieve their goals before Ranveer discusses all the challenges they face when it comes to multimodal AI. Ranveer is very hopeful that machine learning can drastically improve agriculture. He tells me what new AI technologies he is most excited about, their potential impact on agriculture, and even shares advice for other leaders in AI. Finally, my guest warns us against the potential divide society can create if AI is not made accessible to all people. You don’t want to miss out on this informative and incredibly interesting episode so press play now!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing today’s guest, Ranveer Chandra.</li><li>A bit about Ranveer’s background and how he landed up at Microsoft Research. </li><li>How Microsoft got involved in agriculture. </li><li>Ranveer tells us about data-driven agriculture, what it means, and how he plans to achieve it. </li><li>The kinds of data they collect from farms in order to achieve these goals. </li><li>Challenges associated with multimodal AI.</li><li>How these technologies have been deployed so far.  </li><li>What new technology Ranveer is excited about in the world of machine learning.</li><li>Ranveer shares some advice for other leaders of AI-based products. </li><li>The potential impact of data-driven and AI technologies for agriculture in the future. </li><li>Ranveer warns us about the dangers of creating an AI-divide and what that would mean. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Technology could have a deep impact on agriculture. It could address the world's food problem; it could help improve livelihoods of a lot of smallholder farmers.” — Ranveer Chandra</p><p><br></p><p>“The key question is, how do you sustainably nourish the planet? How do you sustainably nourish the people in this world?” — Ranveer Chandra</p><p><br></p><p>“Microsoft is not an agriculture company. So we are not sending anything to farmers, but we are providing the tools on top of which you could build solutions for farmers, or partners, or customers build solutions and take the solutions to farmers.” — Ranveer Chandra</p><p><br></p><p>“We need to make data consumable, and generative AI has the suitability to make that data more consumable.” — Ranveer Chandra</p><p><br></p><p>“There are over 500 million smallholder farmers worldwide whose lives would benefit with artificial intelligence.” — Ranveer Chandra</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/ranveer-chandra-79bb9b/">Ranveer Chandra on LinkedIn</a></p><p><a href="https://twitter.com/RanveerChandra">Ranveer Chandra on X</a></p><p><a href="https://www.instagram.com/chandraranveer/">Ranveer Chandra on Instagram</a></p><p><a href="https://www.microsoft.com/en-us/research/people/ranveer/">Microsoft Research – Ranveer Chandra</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/custom-vision-model-assessment/">Custom Vision Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 15 Jan 2024 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/efd334e9/02236d43.mp3" length="40065974" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/ajAljFKHJz8-Wcf5k1PkYNULJa5Ip76JcfEF71dLUOk/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE2NTM0NzUv/MTcwMzEwNzI3OS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1662</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>What if you were told that AI could improve agriculture, reduce climate change, and potentially solve global food insecurity? In this episode of Impact AI, I am joined by Ranveer Chandra from Microsoft Research to discuss his work in the world of agriculture. Tuning in, you’ll hear all about Ranveer’s career, how he got his agriculture idea picked up by Microsoft, data-driven agriculture, and more! We then delve into the data needed to achieve their goals before Ranveer discusses all the challenges they face when it comes to multimodal AI. Ranveer is very hopeful that machine learning can drastically improve agriculture. He tells me what new AI technologies he is most excited about, their potential impact on agriculture, and even shares advice for other leaders in AI. Finally, my guest warns us against the potential divide society can create if AI is not made accessible to all people. You don’t want to miss out on this informative and incredibly interesting episode so press play now!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing today’s guest, Ranveer Chandra.</li><li>A bit about Ranveer’s background and how he landed up at Microsoft Research. </li><li>How Microsoft got involved in agriculture. </li><li>Ranveer tells us about data-driven agriculture, what it means, and how he plans to achieve it. </li><li>The kinds of data they collect from farms in order to achieve these goals. </li><li>Challenges associated with multimodal AI.</li><li>How these technologies have been deployed so far.  </li><li>What new technology Ranveer is excited about in the world of machine learning.</li><li>Ranveer shares some advice for other leaders of AI-based products. </li><li>The potential impact of data-driven and AI technologies for agriculture in the future. </li><li>Ranveer warns us about the dangers of creating an AI-divide and what that would mean. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Technology could have a deep impact on agriculture. It could address the world's food problem; it could help improve livelihoods of a lot of smallholder farmers.” — Ranveer Chandra</p><p><br></p><p>“The key question is, how do you sustainably nourish the planet? How do you sustainably nourish the people in this world?” — Ranveer Chandra</p><p><br></p><p>“Microsoft is not an agriculture company. So we are not sending anything to farmers, but we are providing the tools on top of which you could build solutions for farmers, or partners, or customers build solutions and take the solutions to farmers.” — Ranveer Chandra</p><p><br></p><p>“We need to make data consumable, and generative AI has the suitability to make that data more consumable.” — Ranveer Chandra</p><p><br></p><p>“There are over 500 million smallholder farmers worldwide whose lives would benefit with artificial intelligence.” — Ranveer Chandra</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/ranveer-chandra-79bb9b/">Ranveer Chandra on LinkedIn</a></p><p><a href="https://twitter.com/RanveerChandra">Ranveer Chandra on X</a></p><p><a href="https://www.instagram.com/chandraranveer/">Ranveer Chandra on Instagram</a></p><p><a href="https://www.microsoft.com/en-us/research/people/ranveer/">Microsoft Research – Ranveer Chandra</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/custom-vision-model-assessment/">Custom Vision Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, agriculture, farming</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/efd334e9/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Unlocking Metabolic Health with Bill Tancer from Signos</title>
      <itunes:episode>64</itunes:episode>
      <podcast:episode>64</podcast:episode>
      <itunes:title>Unlocking Metabolic Health with Bill Tancer from Signos</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">7d8e405c-07bc-42e7-8feb-0e2ba90ebbce</guid>
      <link>https://pixelscientia.com/podcast/unlocking-metabolic-health-with-bill-tancer-from-signos/</link>
      <description>
        <![CDATA[<p>Continuous glucose monitors (CGMs) are a trusted tool for diabetics, but today’s guest believes that widespread adoption could also be valuable for reversing the obesity crisis. Meet Bill Tancer, the Co-founder and Chief Data Scientist of Signos, a metabolic health platform that combines CGMs with a unique AI engine to offer real-time data and recommendations for healthy weight management.</p><p>Today, Bill joins me to talk about all things metabolic health and machine learning. Tune in as we discuss how the Signos team trains their machine learning algorithms, the challenges they encounter when it comes to gathering data, and some of the other external factors that influence the performance of their model. We also touch on the value of qualitative data in the form of user feedback, the importance of keeping your mission in mind in the rapidly expanding AI space, and so much more! To find out how Signos is unlocking metabolic health with ML, don’t miss this episode of Impact AI.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Reflecting on the personal and professional paths that led Bill to create Signos.</li><li>What Signos does for glycemic dysregulation and why it’s so important for healthcare.</li><li>Insight into the role that ML plays in Signos’ technology.</li><li>How Signos trains their ML algorithms using various sources of data.</li><li>Food logging and other challenges that come with gathering CGM data.</li><li>Ways that external factors influence model performance and how Signos mitigates that.</li><li>Qualitative user responses that help Bill measure the impact of this technology.</li><li>Bill’s mission-driven advice for other leaders of AI-powered startups.</li><li>How he believes the impact of Signos will continue to evolve going forward.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Along with diabetes as its own health risk, having [dysregulated] glucose can lead to other medical problems. Cardiovascular disease, stroke, Alzheimer's, just to name a few. [It] is such an important goal for [Signos] to help people reduce their glycemic variability.” — Bill Tancer</p><p><br></p><p>“That's what gets me up in the morning; hearing [positive user anecdotes]. That, in conjunction with looking at our own data and how our members are improving in terms of their wellness, tells us we're having a measurable impact.” — Bill Tancer</p><p><br></p><p>“It is so easy [with] all the things you can do with AI to end up in a space where you've got a solution that's searching for a problem to solve. The antidote to finding yourself in that situation is always returning back to your mission.” — Bill Tancer</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.signos.com/">Signos</a></p><p><a href="https://www.signos.com/blog-category/podcast">Body Signals Podcast</a></p><p><a href="https://www.linkedin.com/in/bill-tancer-986683/">Bill Tancer on LinkedIn</a></p><p><a href="https://www.instagram.com/billtancer/">Bill Tancer on Instagram</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/custom-vision-model-assessment/">Custom Vision Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Continuous glucose monitors (CGMs) are a trusted tool for diabetics, but today’s guest believes that widespread adoption could also be valuable for reversing the obesity crisis. Meet Bill Tancer, the Co-founder and Chief Data Scientist of Signos, a metabolic health platform that combines CGMs with a unique AI engine to offer real-time data and recommendations for healthy weight management.</p><p>Today, Bill joins me to talk about all things metabolic health and machine learning. Tune in as we discuss how the Signos team trains their machine learning algorithms, the challenges they encounter when it comes to gathering data, and some of the other external factors that influence the performance of their model. We also touch on the value of qualitative data in the form of user feedback, the importance of keeping your mission in mind in the rapidly expanding AI space, and so much more! To find out how Signos is unlocking metabolic health with ML, don’t miss this episode of Impact AI.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Reflecting on the personal and professional paths that led Bill to create Signos.</li><li>What Signos does for glycemic dysregulation and why it’s so important for healthcare.</li><li>Insight into the role that ML plays in Signos’ technology.</li><li>How Signos trains their ML algorithms using various sources of data.</li><li>Food logging and other challenges that come with gathering CGM data.</li><li>Ways that external factors influence model performance and how Signos mitigates that.</li><li>Qualitative user responses that help Bill measure the impact of this technology.</li><li>Bill’s mission-driven advice for other leaders of AI-powered startups.</li><li>How he believes the impact of Signos will continue to evolve going forward.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Along with diabetes as its own health risk, having [dysregulated] glucose can lead to other medical problems. Cardiovascular disease, stroke, Alzheimer's, just to name a few. [It] is such an important goal for [Signos] to help people reduce their glycemic variability.” — Bill Tancer</p><p><br></p><p>“That's what gets me up in the morning; hearing [positive user anecdotes]. That, in conjunction with looking at our own data and how our members are improving in terms of their wellness, tells us we're having a measurable impact.” — Bill Tancer</p><p><br></p><p>“It is so easy [with] all the things you can do with AI to end up in a space where you've got a solution that's searching for a problem to solve. The antidote to finding yourself in that situation is always returning back to your mission.” — Bill Tancer</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.signos.com/">Signos</a></p><p><a href="https://www.signos.com/blog-category/podcast">Body Signals Podcast</a></p><p><a href="https://www.linkedin.com/in/bill-tancer-986683/">Bill Tancer on LinkedIn</a></p><p><a href="https://www.instagram.com/billtancer/">Bill Tancer on Instagram</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/custom-vision-model-assessment/">Custom Vision Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 08 Jan 2024 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/fb75d582/1e04dfe6.mp3" length="28538191" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/c33OtCFKNAzk-aVfmksDNuNLSdRBoXeKXWCUrs6SXpU/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE2NTM0NzAv/MTcwMzEwNzA4Ny1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1185</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Continuous glucose monitors (CGMs) are a trusted tool for diabetics, but today’s guest believes that widespread adoption could also be valuable for reversing the obesity crisis. Meet Bill Tancer, the Co-founder and Chief Data Scientist of Signos, a metabolic health platform that combines CGMs with a unique AI engine to offer real-time data and recommendations for healthy weight management.</p><p>Today, Bill joins me to talk about all things metabolic health and machine learning. Tune in as we discuss how the Signos team trains their machine learning algorithms, the challenges they encounter when it comes to gathering data, and some of the other external factors that influence the performance of their model. We also touch on the value of qualitative data in the form of user feedback, the importance of keeping your mission in mind in the rapidly expanding AI space, and so much more! To find out how Signos is unlocking metabolic health with ML, don’t miss this episode of Impact AI.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Reflecting on the personal and professional paths that led Bill to create Signos.</li><li>What Signos does for glycemic dysregulation and why it’s so important for healthcare.</li><li>Insight into the role that ML plays in Signos’ technology.</li><li>How Signos trains their ML algorithms using various sources of data.</li><li>Food logging and other challenges that come with gathering CGM data.</li><li>Ways that external factors influence model performance and how Signos mitigates that.</li><li>Qualitative user responses that help Bill measure the impact of this technology.</li><li>Bill’s mission-driven advice for other leaders of AI-powered startups.</li><li>How he believes the impact of Signos will continue to evolve going forward.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Along with diabetes as its own health risk, having [dysregulated] glucose can lead to other medical problems. Cardiovascular disease, stroke, Alzheimer's, just to name a few. [It] is such an important goal for [Signos] to help people reduce their glycemic variability.” — Bill Tancer</p><p><br></p><p>“That's what gets me up in the morning; hearing [positive user anecdotes]. That, in conjunction with looking at our own data and how our members are improving in terms of their wellness, tells us we're having a measurable impact.” — Bill Tancer</p><p><br></p><p>“It is so easy [with] all the things you can do with AI to end up in a space where you've got a solution that's searching for a problem to solve. The antidote to finding yourself in that situation is always returning back to your mission.” — Bill Tancer</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.signos.com/">Signos</a></p><p><a href="https://www.signos.com/blog-category/podcast">Body Signals Podcast</a></p><p><a href="https://www.linkedin.com/in/bill-tancer-986683/">Bill Tancer on LinkedIn</a></p><p><a href="https://www.instagram.com/billtancer/">Bill Tancer on Instagram</a></p><p><br><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/custom-vision-model-assessment/">Custom Vision Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, metabolism, metabolic health, continuous glucose monitor</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/fb75d582/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Diagnosing Infection with Ljubomir Buturovic from Inflammatix</title>
      <itunes:episode>63</itunes:episode>
      <podcast:episode>63</podcast:episode>
      <itunes:title>Diagnosing Infection with Ljubomir Buturovic from Inflammatix</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">45158b1c-1694-4660-b55c-66c1102176b0</guid>
      <link>https://pixelscientia.com/podcast/diagnosing-infection-with-ljubomir-buturovic-from-inflammatix/</link>
      <description>
        <![CDATA[<p>In an emergency setting, making a quick diagnosis under pressure is often a matter of life or death. This is especially true when it comes to diagnosing infectious diseases. Unfortunately, diagnosing infections in an emergency department is rife with challenges. Current tests either take too long, deliver unreliable results, or both. That’s where Inflammatix comes in. They are using machine learning technology to develop a point-of-care instrument that will diagnose the type of infection, and severity of infection, in emergency care quickly and effectively. Their first main product is currently in the late stages of development and can deliver a test report in about half an hour using cold blood as a sample source.</p><p>Joining me today to shed light on this incredible initiative is Ljubomir Buturovic, Vice President of Machine Learning at Inflammatix. We hear from Ljubomir about the role that machine learning played in this technology, key challenges they’ve encountered while training models on gene expression data, how they selected the 29 clinically relevant genes based on published scientific papers, plus a whole lot more. Tune in today to learn more about the groundbreaking work being done at Inflammatix and what you can expect from them in future!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A warm welcome to today’s guest Ljubomir Buturovic.</li><li>Ljubomir’s background in machine learning and what led him to Inflammatix.</li><li>An overview of the important work being done at Inflammatix in healthcare.</li><li>Details about their main product for diagnosis in emergency care.</li><li>The role of machine learning in their technology to measure gene expression.</li><li>How they selected the 29 clinically relevant genes based on published scientific papers.</li><li>Key challenges they encountered while training models on gene expression data.</li><li>Ground truth labels; the strategies they used to identify infections and validate their models.</li><li>How they made sure that their models would work for multiple assay platforms.</li><li>Using grouped analysis to ensure their models would serve a diverse patient population.</li><li>Their approach to developing technology that would fit in with the clinical workflow and provide the right assistance to doctors and patients.</li><li>The benefits that Inflammatix has seen from publishing their work.</li><li>Ljubomir’s advice to other leaders of AI-powered startups working in healthcare.</li><li>Where you can expect to see Inflammatix in five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We developed an instrument which measures this gene expression for 29 clinically relevant genes for infections.” — Ljubomir Buturovic</p><p><br></p><p>“It takes a long time to achieve adoption. This is basically applying AI in medicine. When you are applying AI in medicine, the whole process of development and adoption works on medicine timescales, not on AI timescales.” — Ljubomir Buturovic</p><p><br></p><p>“One of the key challenges in applying machine learning in clinical test design is the availability of samples for training and validation. This is in sharp contrast to other applications, like maybe movie recommendations, or shopping, where you have a lot of input data, because it's relatively easy to collect.” — Ljubomir Buturovic</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://inflammatix.com/">Inflammatix</a></p><p><a href="https://inflammatix.com/ml/#ml1-blogs">Inflammatix's Machine Learning Blog</a></p><p><a href="https://www.linkedin.com/in/ljubomir-buturovic-798156/">Ljubomir Buturovic on LinkedIn</a></p><p><a href="https://twitter.com/ljbuturovic">Ljubomir Buturovic on X</a></p><p><br></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In an emergency setting, making a quick diagnosis under pressure is often a matter of life or death. This is especially true when it comes to diagnosing infectious diseases. Unfortunately, diagnosing infections in an emergency department is rife with challenges. Current tests either take too long, deliver unreliable results, or both. That’s where Inflammatix comes in. They are using machine learning technology to develop a point-of-care instrument that will diagnose the type of infection, and severity of infection, in emergency care quickly and effectively. Their first main product is currently in the late stages of development and can deliver a test report in about half an hour using cold blood as a sample source.</p><p>Joining me today to shed light on this incredible initiative is Ljubomir Buturovic, Vice President of Machine Learning at Inflammatix. We hear from Ljubomir about the role that machine learning played in this technology, key challenges they’ve encountered while training models on gene expression data, how they selected the 29 clinically relevant genes based on published scientific papers, plus a whole lot more. Tune in today to learn more about the groundbreaking work being done at Inflammatix and what you can expect from them in future!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A warm welcome to today’s guest Ljubomir Buturovic.</li><li>Ljubomir’s background in machine learning and what led him to Inflammatix.</li><li>An overview of the important work being done at Inflammatix in healthcare.</li><li>Details about their main product for diagnosis in emergency care.</li><li>The role of machine learning in their technology to measure gene expression.</li><li>How they selected the 29 clinically relevant genes based on published scientific papers.</li><li>Key challenges they encountered while training models on gene expression data.</li><li>Ground truth labels; the strategies they used to identify infections and validate their models.</li><li>How they made sure that their models would work for multiple assay platforms.</li><li>Using grouped analysis to ensure their models would serve a diverse patient population.</li><li>Their approach to developing technology that would fit in with the clinical workflow and provide the right assistance to doctors and patients.</li><li>The benefits that Inflammatix has seen from publishing their work.</li><li>Ljubomir’s advice to other leaders of AI-powered startups working in healthcare.</li><li>Where you can expect to see Inflammatix in five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We developed an instrument which measures this gene expression for 29 clinically relevant genes for infections.” — Ljubomir Buturovic</p><p><br></p><p>“It takes a long time to achieve adoption. This is basically applying AI in medicine. When you are applying AI in medicine, the whole process of development and adoption works on medicine timescales, not on AI timescales.” — Ljubomir Buturovic</p><p><br></p><p>“One of the key challenges in applying machine learning in clinical test design is the availability of samples for training and validation. This is in sharp contrast to other applications, like maybe movie recommendations, or shopping, where you have a lot of input data, because it's relatively easy to collect.” — Ljubomir Buturovic</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://inflammatix.com/">Inflammatix</a></p><p><a href="https://inflammatix.com/ml/#ml1-blogs">Inflammatix's Machine Learning Blog</a></p><p><a href="https://www.linkedin.com/in/ljubomir-buturovic-798156/">Ljubomir Buturovic on LinkedIn</a></p><p><a href="https://twitter.com/ljbuturovic">Ljubomir Buturovic on X</a></p><p><br></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 18 Dec 2023 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/a3ca667e/68e2355c.mp3" length="35816085" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/HJVU5yjyv41ya_NqTxjsErfbiUUbCemvU52OGuezcSs/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE2MDAxMjEv/MTcwMDAwODQyOS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1487</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In an emergency setting, making a quick diagnosis under pressure is often a matter of life or death. This is especially true when it comes to diagnosing infectious diseases. Unfortunately, diagnosing infections in an emergency department is rife with challenges. Current tests either take too long, deliver unreliable results, or both. That’s where Inflammatix comes in. They are using machine learning technology to develop a point-of-care instrument that will diagnose the type of infection, and severity of infection, in emergency care quickly and effectively. Their first main product is currently in the late stages of development and can deliver a test report in about half an hour using cold blood as a sample source.</p><p>Joining me today to shed light on this incredible initiative is Ljubomir Buturovic, Vice President of Machine Learning at Inflammatix. We hear from Ljubomir about the role that machine learning played in this technology, key challenges they’ve encountered while training models on gene expression data, how they selected the 29 clinically relevant genes based on published scientific papers, plus a whole lot more. Tune in today to learn more about the groundbreaking work being done at Inflammatix and what you can expect from them in future!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A warm welcome to today’s guest Ljubomir Buturovic.</li><li>Ljubomir’s background in machine learning and what led him to Inflammatix.</li><li>An overview of the important work being done at Inflammatix in healthcare.</li><li>Details about their main product for diagnosis in emergency care.</li><li>The role of machine learning in their technology to measure gene expression.</li><li>How they selected the 29 clinically relevant genes based on published scientific papers.</li><li>Key challenges they encountered while training models on gene expression data.</li><li>Ground truth labels; the strategies they used to identify infections and validate their models.</li><li>How they made sure that their models would work for multiple assay platforms.</li><li>Using grouped analysis to ensure their models would serve a diverse patient population.</li><li>Their approach to developing technology that would fit in with the clinical workflow and provide the right assistance to doctors and patients.</li><li>The benefits that Inflammatix has seen from publishing their work.</li><li>Ljubomir’s advice to other leaders of AI-powered startups working in healthcare.</li><li>Where you can expect to see Inflammatix in five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We developed an instrument which measures this gene expression for 29 clinically relevant genes for infections.” — Ljubomir Buturovic</p><p><br></p><p>“It takes a long time to achieve adoption. This is basically applying AI in medicine. When you are applying AI in medicine, the whole process of development and adoption works on medicine timescales, not on AI timescales.” — Ljubomir Buturovic</p><p><br></p><p>“One of the key challenges in applying machine learning in clinical test design is the availability of samples for training and validation. This is in sharp contrast to other applications, like maybe movie recommendations, or shopping, where you have a lot of input data, because it's relatively easy to collect.” — Ljubomir Buturovic</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://inflammatix.com/">Inflammatix</a></p><p><a href="https://inflammatix.com/ml/#ml1-blogs">Inflammatix's Machine Learning Blog</a></p><p><a href="https://www.linkedin.com/in/ljubomir-buturovic-798156/">Ljubomir Buturovic on LinkedIn</a></p><p><a href="https://twitter.com/ljbuturovic">Ljubomir Buturovic on X</a></p><p><br></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, infection, sepsis, emergency department, diagnostics</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/a3ca667e/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Foundation Models for Earth Observation with Hamed Alemohamad from Clark University</title>
      <itunes:episode>62</itunes:episode>
      <podcast:episode>62</podcast:episode>
      <itunes:title>Foundation Models for Earth Observation with Hamed Alemohamad from Clark University</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">8fdd3ba1-19ee-4d4e-8267-e9ceadf01770</guid>
      <link>https://pixelscientia.com/podcast/foundation-models-for-earth-observation-with-hamed-alemohamad-from-clark-university/</link>
      <description>
        <![CDATA[<p>There are now a few different AI foundation models available for Earth Observation (EO) data. These vast neural networks can be rapidly fine-tuned for many downstream tasks, making them a highly versatile and appealing tool.</p><p>Today on Impact AI, I am joined by Hamed Alemohammad, Associate Professor in the Department of Geography at Clark University, Director of the Clark Center for Geospatial Analytics, and former Chief Data Scientist of the Radiant Earth Foundation, to discuss the applications of foundation models for remote sensing. Hamed’s research interests lie at the intersection of geographic information science and geography, using observations and analytical methods like machine learning to better understand the changing systems of our planet.</p><p>In this episode, he shares his perspective on the myriad purposes that foundation models serve and offers insight into training and fine-tuning them for different downstream applications. We also discuss how to choose the right one for a given project, ethical considerations for using them responsibly, and more. For a glimpse at the future of foundation models for remote sensing, tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A look at Hamed’s professional journey and the research topics he focuses on today.</li><li>Defining foundation models and the purposes they serve.</li><li>The vast amount of data and resources required to train and fine-tune a foundation model.</li><li>Ways to determine whether or not a foundation model will be beneficial.</li><li>How foundation models improve generalizability for downstream tasks.</li><li>Factors to consider when selecting a foundation model for a given downstream task.</li><li>Insight into the future of foundation models for remote sensing.</li><li>Hamed’s advice for machine learning teams looking to give foundation models a try.</li><li>His take on the impact of foundation models in the next three to five years.</li><li>Ethical considerations for the responsible use of AI that apply to foundation models too.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[Foundation models] are pre-trained on a large amount of unlabeled data. Secondly, they use self-supervised learning techniques – The third property is that you can fine-tune this model with a very small set of labeled data for multiple downstream tasks.” — Hamed Alemohammad</p><p><br></p><p>“It takes a lot to train a model, but you would not [do it] as frequently as you would [fine-tune] the model. You can use shared resources from different teams to do that - share it as an open-source model, and then anybody can fine-tune it for their downstream application.” — Hamed Alemohammad</p><p><br></p><p>“The promising future [for foundation models] will be combining different modes of data as input.” — Hamed Alemohammad</p><p><br></p><p>“There is a lot to do and the community is eager to learn, so if people are looking for challenging problems, I would encourage them to explore [the foundation model domain] and work with domain experts.” — Hamed Alemohammad</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://hamedalemo.github.io/">Hamed Alemohammad</a></p><p><a href="https://www.clarku.edu/faculty/profiles/hamed-alemohammad/">Hamed Alemohammad, Clark University</a> </p><p><a href="https://www.linkedin.com/in/hamedalemo/">Hamed Alemohammad on LinkedIn</a></p><p><a href="https://twitter.com/HamedAlemo">Hamed Alemohammad on X</a></p><p><a href="https://github.com/hamedalemo">Hamed Alemohammad on GitHub</a></p><p><a href="https://arxiv.org/abs/2310.18660">Foundation Models for Generalist Geospatial Artificial Intelligence</a></p><p><a href="https://huggingface.co/ibm-nasa-geospatial/Prithvi-100M">Prithvi-100M on Hugging Face</a></p><p><a href="https://huggingface.co/ibm-nasa-geospatial/Prithvi-100M-multi-temporal-crop-classification">HLS Multi-Temporal Crop Classification Model on Hugging Face</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/custom-vision-model-assessment/">Custom Vision Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>There are now a few different AI foundation models available for Earth Observation (EO) data. These vast neural networks can be rapidly fine-tuned for many downstream tasks, making them a highly versatile and appealing tool.</p><p>Today on Impact AI, I am joined by Hamed Alemohammad, Associate Professor in the Department of Geography at Clark University, Director of the Clark Center for Geospatial Analytics, and former Chief Data Scientist of the Radiant Earth Foundation, to discuss the applications of foundation models for remote sensing. Hamed’s research interests lie at the intersection of geographic information science and geography, using observations and analytical methods like machine learning to better understand the changing systems of our planet.</p><p>In this episode, he shares his perspective on the myriad purposes that foundation models serve and offers insight into training and fine-tuning them for different downstream applications. We also discuss how to choose the right one for a given project, ethical considerations for using them responsibly, and more. For a glimpse at the future of foundation models for remote sensing, tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A look at Hamed’s professional journey and the research topics he focuses on today.</li><li>Defining foundation models and the purposes they serve.</li><li>The vast amount of data and resources required to train and fine-tune a foundation model.</li><li>Ways to determine whether or not a foundation model will be beneficial.</li><li>How foundation models improve generalizability for downstream tasks.</li><li>Factors to consider when selecting a foundation model for a given downstream task.</li><li>Insight into the future of foundation models for remote sensing.</li><li>Hamed’s advice for machine learning teams looking to give foundation models a try.</li><li>His take on the impact of foundation models in the next three to five years.</li><li>Ethical considerations for the responsible use of AI that apply to foundation models too.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[Foundation models] are pre-trained on a large amount of unlabeled data. Secondly, they use self-supervised learning techniques – The third property is that you can fine-tune this model with a very small set of labeled data for multiple downstream tasks.” — Hamed Alemohammad</p><p><br></p><p>“It takes a lot to train a model, but you would not [do it] as frequently as you would [fine-tune] the model. You can use shared resources from different teams to do that - share it as an open-source model, and then anybody can fine-tune it for their downstream application.” — Hamed Alemohammad</p><p><br></p><p>“The promising future [for foundation models] will be combining different modes of data as input.” — Hamed Alemohammad</p><p><br></p><p>“There is a lot to do and the community is eager to learn, so if people are looking for challenging problems, I would encourage them to explore [the foundation model domain] and work with domain experts.” — Hamed Alemohammad</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://hamedalemo.github.io/">Hamed Alemohammad</a></p><p><a href="https://www.clarku.edu/faculty/profiles/hamed-alemohammad/">Hamed Alemohammad, Clark University</a> </p><p><a href="https://www.linkedin.com/in/hamedalemo/">Hamed Alemohammad on LinkedIn</a></p><p><a href="https://twitter.com/HamedAlemo">Hamed Alemohammad on X</a></p><p><a href="https://github.com/hamedalemo">Hamed Alemohammad on GitHub</a></p><p><a href="https://arxiv.org/abs/2310.18660">Foundation Models for Generalist Geospatial Artificial Intelligence</a></p><p><a href="https://huggingface.co/ibm-nasa-geospatial/Prithvi-100M">Prithvi-100M on Hugging Face</a></p><p><a href="https://huggingface.co/ibm-nasa-geospatial/Prithvi-100M-multi-temporal-crop-classification">HLS Multi-Temporal Crop Classification Model on Hugging Face</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/custom-vision-model-assessment/">Custom Vision Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </content:encoded>
      <pubDate>Mon, 11 Dec 2023 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/463d3cd0/0892f362.mp3" length="28698311" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/ymOvtyL0DhusU8-8ZF444-9Li2SJMOANlh_9gI9qEwQ/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE2MzE5NzQv/MTcwMTgxNzk1NC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1782</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>There are now a few different AI foundation models available for Earth Observation (EO) data. These vast neural networks can be rapidly fine-tuned for many downstream tasks, making them a highly versatile and appealing tool.</p><p>Today on Impact AI, I am joined by Hamed Alemohammad, Associate Professor in the Department of Geography at Clark University, Director of the Clark Center for Geospatial Analytics, and former Chief Data Scientist of the Radiant Earth Foundation, to discuss the applications of foundation models for remote sensing. Hamed’s research interests lie at the intersection of geographic information science and geography, using observations and analytical methods like machine learning to better understand the changing systems of our planet.</p><p>In this episode, he shares his perspective on the myriad purposes that foundation models serve and offers insight into training and fine-tuning them for different downstream applications. We also discuss how to choose the right one for a given project, ethical considerations for using them responsibly, and more. For a glimpse at the future of foundation models for remote sensing, tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A look at Hamed’s professional journey and the research topics he focuses on today.</li><li>Defining foundation models and the purposes they serve.</li><li>The vast amount of data and resources required to train and fine-tune a foundation model.</li><li>Ways to determine whether or not a foundation model will be beneficial.</li><li>How foundation models improve generalizability for downstream tasks.</li><li>Factors to consider when selecting a foundation model for a given downstream task.</li><li>Insight into the future of foundation models for remote sensing.</li><li>Hamed’s advice for machine learning teams looking to give foundation models a try.</li><li>His take on the impact of foundation models in the next three to five years.</li><li>Ethical considerations for the responsible use of AI that apply to foundation models too.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[Foundation models] are pre-trained on a large amount of unlabeled data. Secondly, they use self-supervised learning techniques – The third property is that you can fine-tune this model with a very small set of labeled data for multiple downstream tasks.” — Hamed Alemohammad</p><p><br></p><p>“It takes a lot to train a model, but you would not [do it] as frequently as you would [fine-tune] the model. You can use shared resources from different teams to do that - share it as an open-source model, and then anybody can fine-tune it for their downstream application.” — Hamed Alemohammad</p><p><br></p><p>“The promising future [for foundation models] will be combining different modes of data as input.” — Hamed Alemohammad</p><p><br></p><p>“There is a lot to do and the community is eager to learn, so if people are looking for challenging problems, I would encourage them to explore [the foundation model domain] and work with domain experts.” — Hamed Alemohammad</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://hamedalemo.github.io/">Hamed Alemohammad</a></p><p><a href="https://www.clarku.edu/faculty/profiles/hamed-alemohammad/">Hamed Alemohammad, Clark University</a> </p><p><a href="https://www.linkedin.com/in/hamedalemo/">Hamed Alemohammad on LinkedIn</a></p><p><a href="https://twitter.com/HamedAlemo">Hamed Alemohammad on X</a></p><p><a href="https://github.com/hamedalemo">Hamed Alemohammad on GitHub</a></p><p><a href="https://arxiv.org/abs/2310.18660">Foundation Models for Generalist Geospatial Artificial Intelligence</a></p><p><a href="https://huggingface.co/ibm-nasa-geospatial/Prithvi-100M">Prithvi-100M on Hugging Face</a></p><p><a href="https://huggingface.co/ibm-nasa-geospatial/Prithvi-100M-multi-temporal-crop-classification">HLS Multi-Temporal Crop Classification Model on Hugging Face</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/custom-vision-model-assessment/">Custom Vision Model Assessment</a> – Foundation models are popping up everywhere – do you need one for your proprietary image dataset? Get a clear perspective on whether you can benefit from a domain-specific foundation model.</p>]]>
      </itunes:summary>
      <itunes:keywords>earth observation, machine learning, deep learning, computer vision, satellite, remote sensing</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/463d3cd0/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Breast Cancer Screening with Stefan Bunk and Christian Leibig from Vara</title>
      <itunes:episode>61</itunes:episode>
      <podcast:episode>61</podcast:episode>
      <itunes:title>Breast Cancer Screening with Stefan Bunk and Christian Leibig from Vara</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">2bcbe050-8d5d-497a-8a00-d6e8b558c46d</guid>
      <link>https://pixelscientia.com/podcast/breast-cancer-screening-with-stefan-bunk-and-christian-leibig-from-vara/</link>
      <description>
        <![CDATA[<p>Could there be a future where not using AI is considered unethical? With the growing efficiency created by AI support, radiologists are able to focus on the most important aspects of their work. During this conversation, I am joined by Stefan Bunk and Christian Leibig from Vara. Tuning in, you’ll hear about the essential practice of maintaining a high standard of data quality and how AI technology is revolutionizing breast cancer detection and treatment. We discuss the relevance of German innovation and research on a global community, and the step-by-step process that Vara adopts to test and introduce AI products. You’ll also hear about Stefan and Christian’s vision for the future of Vara. Don’t miss this episode, packed with powerful insights!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Stefan Bunk and Christian Leibig from Vara. </li><li>Vara’s mission for breast cancer outcomes in line with WHO’s Global Breast Cancer Initiative.</li><li>The role of machine learning in Vara’s technology.</li><li>What the AI technology predicts and the software that goes into this. </li><li>Why it is essential to maintain a high standard of data quality.</li><li>The relationship between images from earlier exams and current procedures. </li><li>How models are trained to manage different variations. </li><li>The relevance of German data for global application.</li><li>Why it is important to have strong processes around AI deployment. </li><li>What it means to run in Shadow Mode first and why Vara chooses to do this with AI products.</li><li>How they established the best way to integrate AI into the workflow.</li><li>The crucial role of trust in machine learning models. </li><li>Monitoring AI models constantly and creating the means to react quickly.</li><li>Where Stefan and Christian see the impact of Vara in five years. </li><li>The enduring goal of Vara: to support radiologists as they focus on the most important factors. </li><li>Considering the possibility that not using AI will become unethical in the future. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our ambition is to find every deadly breast cancer early. Breast cancer is the most common cancer actually worldwide, one out of eight women will have it at some point in their lifetime.” —  Stefan Bunk</p><p><br></p><p>“At Vara, we want to empower health systems to systematically find more cancers much earlier and systematically downstage cancers.” — Stefan Bunk</p><p><br></p><p>“A<strong> </strong>machine learning model can actually outperform a radiologist with a single image, but nevertheless, can still benefit from taking comparisons across images into account.” —  Christian Leibig</p><p><br></p><p>“When you roll out a technology such as AI, which is the technology that is hard to understand, and you cannot always predict how it behaves in certain edge cases. We believe there must be strong processes around it wherever you will deploy your AI.” — Stefan Bunk</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/stefan-bunk/">Stefan Bunk on LinkedIn</a></p><p><a href="https://www.linkedin.com/in/christian-leibig-49ab57175/">Christian Leibig on LinkedIn</a> </p><p><a href="https://www.vara.ai/">Vara</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Could there be a future where not using AI is considered unethical? With the growing efficiency created by AI support, radiologists are able to focus on the most important aspects of their work. During this conversation, I am joined by Stefan Bunk and Christian Leibig from Vara. Tuning in, you’ll hear about the essential practice of maintaining a high standard of data quality and how AI technology is revolutionizing breast cancer detection and treatment. We discuss the relevance of German innovation and research on a global community, and the step-by-step process that Vara adopts to test and introduce AI products. You’ll also hear about Stefan and Christian’s vision for the future of Vara. Don’t miss this episode, packed with powerful insights!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Stefan Bunk and Christian Leibig from Vara. </li><li>Vara’s mission for breast cancer outcomes in line with WHO’s Global Breast Cancer Initiative.</li><li>The role of machine learning in Vara’s technology.</li><li>What the AI technology predicts and the software that goes into this. </li><li>Why it is essential to maintain a high standard of data quality.</li><li>The relationship between images from earlier exams and current procedures. </li><li>How models are trained to manage different variations. </li><li>The relevance of German data for global application.</li><li>Why it is important to have strong processes around AI deployment. </li><li>What it means to run in Shadow Mode first and why Vara chooses to do this with AI products.</li><li>How they established the best way to integrate AI into the workflow.</li><li>The crucial role of trust in machine learning models. </li><li>Monitoring AI models constantly and creating the means to react quickly.</li><li>Where Stefan and Christian see the impact of Vara in five years. </li><li>The enduring goal of Vara: to support radiologists as they focus on the most important factors. </li><li>Considering the possibility that not using AI will become unethical in the future. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our ambition is to find every deadly breast cancer early. Breast cancer is the most common cancer actually worldwide, one out of eight women will have it at some point in their lifetime.” —  Stefan Bunk</p><p><br></p><p>“At Vara, we want to empower health systems to systematically find more cancers much earlier and systematically downstage cancers.” — Stefan Bunk</p><p><br></p><p>“A<strong> </strong>machine learning model can actually outperform a radiologist with a single image, but nevertheless, can still benefit from taking comparisons across images into account.” —  Christian Leibig</p><p><br></p><p>“When you roll out a technology such as AI, which is the technology that is hard to understand, and you cannot always predict how it behaves in certain edge cases. We believe there must be strong processes around it wherever you will deploy your AI.” — Stefan Bunk</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/stefan-bunk/">Stefan Bunk on LinkedIn</a></p><p><a href="https://www.linkedin.com/in/christian-leibig-49ab57175/">Christian Leibig on LinkedIn</a> </p><p><a href="https://www.vara.ai/">Vara</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 04 Dec 2023 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/e24b46a1/6d0fc41c.mp3" length="29004992" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/5icIuBBQ5HGl_VaurUi296STr_yUirdbPsJ54TC1c64/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE2MDAxMTgv/MTcwMDAwODI5MS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1806</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Could there be a future where not using AI is considered unethical? With the growing efficiency created by AI support, radiologists are able to focus on the most important aspects of their work. During this conversation, I am joined by Stefan Bunk and Christian Leibig from Vara. Tuning in, you’ll hear about the essential practice of maintaining a high standard of data quality and how AI technology is revolutionizing breast cancer detection and treatment. We discuss the relevance of German innovation and research on a global community, and the step-by-step process that Vara adopts to test and introduce AI products. You’ll also hear about Stefan and Christian’s vision for the future of Vara. Don’t miss this episode, packed with powerful insights!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Stefan Bunk and Christian Leibig from Vara. </li><li>Vara’s mission for breast cancer outcomes in line with WHO’s Global Breast Cancer Initiative.</li><li>The role of machine learning in Vara’s technology.</li><li>What the AI technology predicts and the software that goes into this. </li><li>Why it is essential to maintain a high standard of data quality.</li><li>The relationship between images from earlier exams and current procedures. </li><li>How models are trained to manage different variations. </li><li>The relevance of German data for global application.</li><li>Why it is important to have strong processes around AI deployment. </li><li>What it means to run in Shadow Mode first and why Vara chooses to do this with AI products.</li><li>How they established the best way to integrate AI into the workflow.</li><li>The crucial role of trust in machine learning models. </li><li>Monitoring AI models constantly and creating the means to react quickly.</li><li>Where Stefan and Christian see the impact of Vara in five years. </li><li>The enduring goal of Vara: to support radiologists as they focus on the most important factors. </li><li>Considering the possibility that not using AI will become unethical in the future. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our ambition is to find every deadly breast cancer early. Breast cancer is the most common cancer actually worldwide, one out of eight women will have it at some point in their lifetime.” —  Stefan Bunk</p><p><br></p><p>“At Vara, we want to empower health systems to systematically find more cancers much earlier and systematically downstage cancers.” — Stefan Bunk</p><p><br></p><p>“A<strong> </strong>machine learning model can actually outperform a radiologist with a single image, but nevertheless, can still benefit from taking comparisons across images into account.” —  Christian Leibig</p><p><br></p><p>“When you roll out a technology such as AI, which is the technology that is hard to understand, and you cannot always predict how it behaves in certain edge cases. We believe there must be strong processes around it wherever you will deploy your AI.” — Stefan Bunk</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/stefan-bunk/">Stefan Bunk on LinkedIn</a></p><p><a href="https://www.linkedin.com/in/christian-leibig-49ab57175/">Christian Leibig on LinkedIn</a> </p><p><a href="https://www.vara.ai/">Vara</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, medical imaging, radiology, mammogram, breast cancer</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/e24b46a1/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Eliminating Verification Hurdles with Vyacheslav Zholudev from Sumsub</title>
      <itunes:episode>60</itunes:episode>
      <podcast:episode>60</podcast:episode>
      <itunes:title>Eliminating Verification Hurdles with Vyacheslav Zholudev from Sumsub</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">d4efe32a-5401-4fdd-92e4-a754a0440bf3</guid>
      <link>https://pixelscientia.com/podcast/eliminating-verification-hurdles-with-vyacheslav-zholudev-from-sumsub/</link>
      <description>
        <![CDATA[<p>Ready to dive deep into the world of online security and identity verification? In this episode, I sit down with Vyacheslav Zholudev from Sumsub to discuss user verification, fraud detection, and the role of machine learning in ensuring the safety of digital interactions. Vyacheslav is the co-founder and CTO of Sumsub, an online verification platform that secures the whole user journey using innovative transaction monitoring and fraud prevention solutions.</p><p>In our conversation, Vyacheslav discusses the evolution of Sumsub, its role in online identity verification, and the challenges posed by deepfakes in the digital world. We explore the cat-and-mouse game against the rising threat of deepfakes, the pivotal role of machine learning in user verification, the challenges posed by generative AI advancements, the ethical considerations in combating biases, and much more. Tune in and discover the future of user verification with Vyacheslav Zholudev from Sumsub!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Vyacheslav's background and the journey that led to the creation of Sumsub.</li><li>Evolution of Sumsub from an anti-Photoshop project to a user verification platform.</li><li>Hear why online user verification is vital for implementing digital features.</li><li>Sumsub’s overall mission and shifting from physical to online identity verification.</li><li>The crucial role of machine learning in Sumsub’s user verification technology.</li><li>How the latest generative AI advancements impact user verification efficiency.</li><li>Implications of deepfakes on society and their potential to facilitate fraud.</li><li>Approaches and techniques used by Sumsub to detect and combat deepfakes.</li><li>Continuous learning and adaptation in the rapidly evolving field of machine learning.</li><li>Ethical concerns and potential biases in models trained for fraud detection.</li><li>Monitoring and preparing to address potential bias in Sumsub’s models.</li><li>Advice for leaders of AI-powered startups and Sumsub's future goals.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Basically, [machine learning] is everywhere. I can’t imagine that our company could exist without machine learning and different algorithms in this area.” — Vyacheslav Zholudev</p><p><br></p><p>“It was really expensive and difficult to create a deepfake that looks realistic. Nowadays, you can do it with a click of a button on your smartphone. That became a problem [for user verification].” — Vyacheslav Zholudev</p><p><br></p><p>“We have a very strong machine learning team and we’re really focusing a lot nowadays on fighting those deepfakes, trying new and new ways how we can protect ourselves and our customers against them.” — Vyacheslav Zholudev</p><p><br></p><p>“Think like a hacker and don’t compromise security. Don’t think that some things won’t be revealed, they will.” — Vyacheslav Zholudev</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/det413/">Vyacheslav Zholudev on LinkedIn</a></p><p><a href="https://sumsub.com">Sumsub</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Ready to dive deep into the world of online security and identity verification? In this episode, I sit down with Vyacheslav Zholudev from Sumsub to discuss user verification, fraud detection, and the role of machine learning in ensuring the safety of digital interactions. Vyacheslav is the co-founder and CTO of Sumsub, an online verification platform that secures the whole user journey using innovative transaction monitoring and fraud prevention solutions.</p><p>In our conversation, Vyacheslav discusses the evolution of Sumsub, its role in online identity verification, and the challenges posed by deepfakes in the digital world. We explore the cat-and-mouse game against the rising threat of deepfakes, the pivotal role of machine learning in user verification, the challenges posed by generative AI advancements, the ethical considerations in combating biases, and much more. Tune in and discover the future of user verification with Vyacheslav Zholudev from Sumsub!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Vyacheslav's background and the journey that led to the creation of Sumsub.</li><li>Evolution of Sumsub from an anti-Photoshop project to a user verification platform.</li><li>Hear why online user verification is vital for implementing digital features.</li><li>Sumsub’s overall mission and shifting from physical to online identity verification.</li><li>The crucial role of machine learning in Sumsub’s user verification technology.</li><li>How the latest generative AI advancements impact user verification efficiency.</li><li>Implications of deepfakes on society and their potential to facilitate fraud.</li><li>Approaches and techniques used by Sumsub to detect and combat deepfakes.</li><li>Continuous learning and adaptation in the rapidly evolving field of machine learning.</li><li>Ethical concerns and potential biases in models trained for fraud detection.</li><li>Monitoring and preparing to address potential bias in Sumsub’s models.</li><li>Advice for leaders of AI-powered startups and Sumsub's future goals.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Basically, [machine learning] is everywhere. I can’t imagine that our company could exist without machine learning and different algorithms in this area.” — Vyacheslav Zholudev</p><p><br></p><p>“It was really expensive and difficult to create a deepfake that looks realistic. Nowadays, you can do it with a click of a button on your smartphone. That became a problem [for user verification].” — Vyacheslav Zholudev</p><p><br></p><p>“We have a very strong machine learning team and we’re really focusing a lot nowadays on fighting those deepfakes, trying new and new ways how we can protect ourselves and our customers against them.” — Vyacheslav Zholudev</p><p><br></p><p>“Think like a hacker and don’t compromise security. Don’t think that some things won’t be revealed, they will.” — Vyacheslav Zholudev</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/det413/">Vyacheslav Zholudev on LinkedIn</a></p><p><a href="https://sumsub.com">Sumsub</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 27 Nov 2023 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/d7c32193/ada24b1e.mp3" length="23656457" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/zQqfyZa-8oq3e5rX2l_vfNqX-hcolaMeP8AfJsVRkpc/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE2MDAxMTMv/MTcwMDAwODA1Ni1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1473</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Ready to dive deep into the world of online security and identity verification? In this episode, I sit down with Vyacheslav Zholudev from Sumsub to discuss user verification, fraud detection, and the role of machine learning in ensuring the safety of digital interactions. Vyacheslav is the co-founder and CTO of Sumsub, an online verification platform that secures the whole user journey using innovative transaction monitoring and fraud prevention solutions.</p><p>In our conversation, Vyacheslav discusses the evolution of Sumsub, its role in online identity verification, and the challenges posed by deepfakes in the digital world. We explore the cat-and-mouse game against the rising threat of deepfakes, the pivotal role of machine learning in user verification, the challenges posed by generative AI advancements, the ethical considerations in combating biases, and much more. Tune in and discover the future of user verification with Vyacheslav Zholudev from Sumsub!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Vyacheslav's background and the journey that led to the creation of Sumsub.</li><li>Evolution of Sumsub from an anti-Photoshop project to a user verification platform.</li><li>Hear why online user verification is vital for implementing digital features.</li><li>Sumsub’s overall mission and shifting from physical to online identity verification.</li><li>The crucial role of machine learning in Sumsub’s user verification technology.</li><li>How the latest generative AI advancements impact user verification efficiency.</li><li>Implications of deepfakes on society and their potential to facilitate fraud.</li><li>Approaches and techniques used by Sumsub to detect and combat deepfakes.</li><li>Continuous learning and adaptation in the rapidly evolving field of machine learning.</li><li>Ethical concerns and potential biases in models trained for fraud detection.</li><li>Monitoring and preparing to address potential bias in Sumsub’s models.</li><li>Advice for leaders of AI-powered startups and Sumsub's future goals.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Basically, [machine learning] is everywhere. I can’t imagine that our company could exist without machine learning and different algorithms in this area.” — Vyacheslav Zholudev</p><p><br></p><p>“It was really expensive and difficult to create a deepfake that looks realistic. Nowadays, you can do it with a click of a button on your smartphone. That became a problem [for user verification].” — Vyacheslav Zholudev</p><p><br></p><p>“We have a very strong machine learning team and we’re really focusing a lot nowadays on fighting those deepfakes, trying new and new ways how we can protect ourselves and our customers against them.” — Vyacheslav Zholudev</p><p><br></p><p>“Think like a hacker and don’t compromise security. Don’t think that some things won’t be revealed, they will.” — Vyacheslav Zholudev</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/det413/">Vyacheslav Zholudev on LinkedIn</a></p><p><a href="https://sumsub.com">Sumsub</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, deepfakes, online security, identity verification, fraud detection</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/d7c32193/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Weather Intelligence with Gard Hauge from StormGeo</title>
      <itunes:episode>59</itunes:episode>
      <podcast:episode>59</podcast:episode>
      <itunes:title>Weather Intelligence with Gard Hauge from StormGeo</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">48252529-a06d-4ab6-9692-d6fc53723863</guid>
      <link>https://pixelscientia.com/podcast/weather-intelligence-with-gard-hauge-from-stormgeo/</link>
      <description>
        <![CDATA[<p>During this conversation, I am joined by guest Gard Hauge, CTO of StormGeo, a weather prediction specialist and researcher with a background in software. We discuss Gard’s extensive research and its application at StormGeo, his historical experience with the company’s evolving relationship with machine learning, how weather and markets are related, and more. Touching on challenges in the field, my guest reveals the growing volume of data he deals with on a daily basis. We discuss the fundamental role of data engineering alongside machine learning, the key role of third-party data, and more before Gard shares his perspective on the future of StormGeo. He also delves into his experience to give informed advice to listeners. Join in to hear more from this thought leader today! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Gard Hauge, CTO of StormGeo, shares his background and introduction to StormGeo. </li><li>The topic of his Ph.D. research: weather prediction.</li><li>Products and services offered by StormGeo beyond weather prediction. </li><li>The evolving role of machine learning at StormGeo and how it is integrated today. </li><li>How weather and markets are related. </li><li>Investments StormGeo is making into generative AI.</li><li>Gard’s relationship with data collection and processing.</li><li>The biggest challenges he faces especially in relation to the volume of data.</li><li>The fundamental role of data engineering in building successful algorithms. </li><li>The key role of third-party data.</li><li>Advice for other AI startup leaders. </li><li>Gard’s predictions for the future of StormGeo. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“The data pipeline is something we put a lot of effort into developing over the last decade. Actually, streamlining how we actually process and make this data available in products and services is key.” — Gard Hauge</p><p><br></p><p>“The amount of data we face is typically doubled every two years. So, we need to be quite smart in handling and processing data and what we're actually archiving for machine learning.” —  Gard Hauge</p><p><br></p><p>“Everybody talks about AI and machine learning. But our experience is that 80% to 85% of the work is basically data engineering, and that's a key fundament if you want to build successful algorithms.” — Gard Hauge</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/gard-hauge-4180883/">Gard Hauge on LinkedIn</a></p><p><a href="https://www.stormgeo.com/">StormGeo</a></p><p><a href="https://linkedin.com/company/stormgeo/">StormGeo on LinkedIn</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>During this conversation, I am joined by guest Gard Hauge, CTO of StormGeo, a weather prediction specialist and researcher with a background in software. We discuss Gard’s extensive research and its application at StormGeo, his historical experience with the company’s evolving relationship with machine learning, how weather and markets are related, and more. Touching on challenges in the field, my guest reveals the growing volume of data he deals with on a daily basis. We discuss the fundamental role of data engineering alongside machine learning, the key role of third-party data, and more before Gard shares his perspective on the future of StormGeo. He also delves into his experience to give informed advice to listeners. Join in to hear more from this thought leader today! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Gard Hauge, CTO of StormGeo, shares his background and introduction to StormGeo. </li><li>The topic of his Ph.D. research: weather prediction.</li><li>Products and services offered by StormGeo beyond weather prediction. </li><li>The evolving role of machine learning at StormGeo and how it is integrated today. </li><li>How weather and markets are related. </li><li>Investments StormGeo is making into generative AI.</li><li>Gard’s relationship with data collection and processing.</li><li>The biggest challenges he faces especially in relation to the volume of data.</li><li>The fundamental role of data engineering in building successful algorithms. </li><li>The key role of third-party data.</li><li>Advice for other AI startup leaders. </li><li>Gard’s predictions for the future of StormGeo. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“The data pipeline is something we put a lot of effort into developing over the last decade. Actually, streamlining how we actually process and make this data available in products and services is key.” — Gard Hauge</p><p><br></p><p>“The amount of data we face is typically doubled every two years. So, we need to be quite smart in handling and processing data and what we're actually archiving for machine learning.” —  Gard Hauge</p><p><br></p><p>“Everybody talks about AI and machine learning. But our experience is that 80% to 85% of the work is basically data engineering, and that's a key fundament if you want to build successful algorithms.” — Gard Hauge</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/gard-hauge-4180883/">Gard Hauge on LinkedIn</a></p><p><a href="https://www.stormgeo.com/">StormGeo</a></p><p><a href="https://linkedin.com/company/stormgeo/">StormGeo on LinkedIn</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 20 Nov 2023 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/037c086a/4646c88f.mp3" length="41959580" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/fYWS1yl70mUrSfiDo2uP1qwGfyccn1OncaHXhNesjW0/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE1OTQ3OTgv/MTY5OTczNjkzNC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1743</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>During this conversation, I am joined by guest Gard Hauge, CTO of StormGeo, a weather prediction specialist and researcher with a background in software. We discuss Gard’s extensive research and its application at StormGeo, his historical experience with the company’s evolving relationship with machine learning, how weather and markets are related, and more. Touching on challenges in the field, my guest reveals the growing volume of data he deals with on a daily basis. We discuss the fundamental role of data engineering alongside machine learning, the key role of third-party data, and more before Gard shares his perspective on the future of StormGeo. He also delves into his experience to give informed advice to listeners. Join in to hear more from this thought leader today! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Gard Hauge, CTO of StormGeo, shares his background and introduction to StormGeo. </li><li>The topic of his Ph.D. research: weather prediction.</li><li>Products and services offered by StormGeo beyond weather prediction. </li><li>The evolving role of machine learning at StormGeo and how it is integrated today. </li><li>How weather and markets are related. </li><li>Investments StormGeo is making into generative AI.</li><li>Gard’s relationship with data collection and processing.</li><li>The biggest challenges he faces especially in relation to the volume of data.</li><li>The fundamental role of data engineering in building successful algorithms. </li><li>The key role of third-party data.</li><li>Advice for other AI startup leaders. </li><li>Gard’s predictions for the future of StormGeo. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“The data pipeline is something we put a lot of effort into developing over the last decade. Actually, streamlining how we actually process and make this data available in products and services is key.” — Gard Hauge</p><p><br></p><p>“The amount of data we face is typically doubled every two years. So, we need to be quite smart in handling and processing data and what we're actually archiving for machine learning.” —  Gard Hauge</p><p><br></p><p>“Everybody talks about AI and machine learning. But our experience is that 80% to 85% of the work is basically data engineering, and that's a key fundament if you want to build successful algorithms.” — Gard Hauge</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/gard-hauge-4180883/">Gard Hauge on LinkedIn</a></p><p><a href="https://www.stormgeo.com/">StormGeo</a></p><p><a href="https://linkedin.com/company/stormgeo/">StormGeo on LinkedIn</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, weather intelligence</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/037c086a/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Managing Eye Diseases with Carlos Ciller from RetinAI</title>
      <itunes:episode>58</itunes:episode>
      <podcast:episode>58</podcast:episode>
      <itunes:title>Managing Eye Diseases with Carlos Ciller from RetinAI</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">bf7b964e-b0ee-489f-983c-61ed91a8e746</guid>
      <link>https://pixelscientia.com/podcast/managing-eye-diseases-with-carlos-ciller-from-retinai/</link>
      <description>
        <![CDATA[<p>Using AI for medical data analysis for eye diseases has the potential to significantly improve diagnosis, treatment, and patient care in ophthalmology. In this episode, I sit down with Carlos Ciller, Co-Founder and CEO of RetinAI, to discuss the impact of AI in the field of healthcare, specifically in the context of RetinAI, a company focused on using AI for medical data analysis for eye diseases. In our conversation, we unpack the world of mission-driven impact in healthcare as Carlos shares his journey from engineer to innovator.</p><p>Uncover how RetinAI's flagship product, "Discovery," is revolutionizing healthcare with AI-powered medical image and data management. Explore the diverse data sources and AI models used, the importance of model robustness, and the influence of regulatory processes. Carlos also discusses the benefits of publishing research and the potential of generative AI, and he offers valuable advice for AI startup leaders. Finally, learn about RetinAI's vision for the future, including its expansion into new therapeutic areas and the pursuit of digital precision medicine. Tune in to uncover the incredible impact of AI in healthcare and RetinAI's pivotal role in this transformation with Carlos Ciller!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Carlos’ professional background and his passion for meaningful healthcare solutions.</li><li>RetinAI’s mission and the range of healthcare products it provides.</li><li>The types of data and AI models that RetinAI leverages.</li><li>Challenges of dealing with diverse data sources, devices, and patient characteristics.</li><li>Ensuring model performance and accuracy in the long term.</li><li>Frozen with fixed weights versus continuous learning models.</li><li>Discover how regulatory processes influence AI development.</li><li>He explains the benefits of publishing research for the development process.</li><li>Explore the potential of generative AI in healthcare.</li><li>Learn the importance of ‘wrapping’ the technology with the right product</li><li>Focusing on the customer, starting small, and letting the market define the product.</li><li>His vision for RetinAI's impact in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[RetinAI is] a software company. We are software that is enabling the right decisions sooner in healthcare, and that, of course, goes a long way.” — Carlos Ciller</p><p><br></p><p>“One of the secret sauces of the company is that around 40% to 50% of the team has actually a very strong academic training, specifically in the ophthalmology space.” — Carlos Ciller</p><p><br></p><p>“Quality is the most important aspect. If you work on quality [data], you will create stronger models.” — Carlos Ciller</p><p><br></p><p>“I think the regulations that we have today, and some of the guidelines and support material provided by regulatory agencies and some of the leaders in academic space are precisely [there] to help you not commit the same mistakes that others committed in the past.” — Carlos Ciller</p><p><br></p><p>“I think that it's important to share your research, and you can still make a good company out of sharing your own research, and letting others build on top of what you are building.” — Carlos Ciller</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://carlosciller.weebly.com">Carlos Ciller</a></p><p><a href="https://www.linkedin.com/in/carlosciller/">Carlos Ciller on LinkedIn</a></p><p><a href="https://twitter.com/carlos_ciller">Carlos Ciller on X</a></p><p><a href="https://www.retinai.com">RetinAI</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Using AI for medical data analysis for eye diseases has the potential to significantly improve diagnosis, treatment, and patient care in ophthalmology. In this episode, I sit down with Carlos Ciller, Co-Founder and CEO of RetinAI, to discuss the impact of AI in the field of healthcare, specifically in the context of RetinAI, a company focused on using AI for medical data analysis for eye diseases. In our conversation, we unpack the world of mission-driven impact in healthcare as Carlos shares his journey from engineer to innovator.</p><p>Uncover how RetinAI's flagship product, "Discovery," is revolutionizing healthcare with AI-powered medical image and data management. Explore the diverse data sources and AI models used, the importance of model robustness, and the influence of regulatory processes. Carlos also discusses the benefits of publishing research and the potential of generative AI, and he offers valuable advice for AI startup leaders. Finally, learn about RetinAI's vision for the future, including its expansion into new therapeutic areas and the pursuit of digital precision medicine. Tune in to uncover the incredible impact of AI in healthcare and RetinAI's pivotal role in this transformation with Carlos Ciller!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Carlos’ professional background and his passion for meaningful healthcare solutions.</li><li>RetinAI’s mission and the range of healthcare products it provides.</li><li>The types of data and AI models that RetinAI leverages.</li><li>Challenges of dealing with diverse data sources, devices, and patient characteristics.</li><li>Ensuring model performance and accuracy in the long term.</li><li>Frozen with fixed weights versus continuous learning models.</li><li>Discover how regulatory processes influence AI development.</li><li>He explains the benefits of publishing research for the development process.</li><li>Explore the potential of generative AI in healthcare.</li><li>Learn the importance of ‘wrapping’ the technology with the right product</li><li>Focusing on the customer, starting small, and letting the market define the product.</li><li>His vision for RetinAI's impact in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[RetinAI is] a software company. We are software that is enabling the right decisions sooner in healthcare, and that, of course, goes a long way.” — Carlos Ciller</p><p><br></p><p>“One of the secret sauces of the company is that around 40% to 50% of the team has actually a very strong academic training, specifically in the ophthalmology space.” — Carlos Ciller</p><p><br></p><p>“Quality is the most important aspect. If you work on quality [data], you will create stronger models.” — Carlos Ciller</p><p><br></p><p>“I think the regulations that we have today, and some of the guidelines and support material provided by regulatory agencies and some of the leaders in academic space are precisely [there] to help you not commit the same mistakes that others committed in the past.” — Carlos Ciller</p><p><br></p><p>“I think that it's important to share your research, and you can still make a good company out of sharing your own research, and letting others build on top of what you are building.” — Carlos Ciller</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://carlosciller.weebly.com">Carlos Ciller</a></p><p><a href="https://www.linkedin.com/in/carlosciller/">Carlos Ciller on LinkedIn</a></p><p><a href="https://twitter.com/carlos_ciller">Carlos Ciller on X</a></p><p><a href="https://www.retinai.com">RetinAI</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 13 Nov 2023 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/14bb3163/66d07441.mp3" length="35730111" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/jdb40eL7jlWslrTbmzzmjFJPss9ySL0yccFHzIwDu_0/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE1MzU2NTQv/MTY5NjU0MDIzMC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1484</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Using AI for medical data analysis for eye diseases has the potential to significantly improve diagnosis, treatment, and patient care in ophthalmology. In this episode, I sit down with Carlos Ciller, Co-Founder and CEO of RetinAI, to discuss the impact of AI in the field of healthcare, specifically in the context of RetinAI, a company focused on using AI for medical data analysis for eye diseases. In our conversation, we unpack the world of mission-driven impact in healthcare as Carlos shares his journey from engineer to innovator.</p><p>Uncover how RetinAI's flagship product, "Discovery," is revolutionizing healthcare with AI-powered medical image and data management. Explore the diverse data sources and AI models used, the importance of model robustness, and the influence of regulatory processes. Carlos also discusses the benefits of publishing research and the potential of generative AI, and he offers valuable advice for AI startup leaders. Finally, learn about RetinAI's vision for the future, including its expansion into new therapeutic areas and the pursuit of digital precision medicine. Tune in to uncover the incredible impact of AI in healthcare and RetinAI's pivotal role in this transformation with Carlos Ciller!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Carlos’ professional background and his passion for meaningful healthcare solutions.</li><li>RetinAI’s mission and the range of healthcare products it provides.</li><li>The types of data and AI models that RetinAI leverages.</li><li>Challenges of dealing with diverse data sources, devices, and patient characteristics.</li><li>Ensuring model performance and accuracy in the long term.</li><li>Frozen with fixed weights versus continuous learning models.</li><li>Discover how regulatory processes influence AI development.</li><li>He explains the benefits of publishing research for the development process.</li><li>Explore the potential of generative AI in healthcare.</li><li>Learn the importance of ‘wrapping’ the technology with the right product</li><li>Focusing on the customer, starting small, and letting the market define the product.</li><li>His vision for RetinAI's impact in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[RetinAI is] a software company. We are software that is enabling the right decisions sooner in healthcare, and that, of course, goes a long way.” — Carlos Ciller</p><p><br></p><p>“One of the secret sauces of the company is that around 40% to 50% of the team has actually a very strong academic training, specifically in the ophthalmology space.” — Carlos Ciller</p><p><br></p><p>“Quality is the most important aspect. If you work on quality [data], you will create stronger models.” — Carlos Ciller</p><p><br></p><p>“I think the regulations that we have today, and some of the guidelines and support material provided by regulatory agencies and some of the leaders in academic space are precisely [there] to help you not commit the same mistakes that others committed in the past.” — Carlos Ciller</p><p><br></p><p>“I think that it's important to share your research, and you can still make a good company out of sharing your own research, and letting others build on top of what you are building.” — Carlos Ciller</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://carlosciller.weebly.com">Carlos Ciller</a></p><p><a href="https://www.linkedin.com/in/carlosciller/">Carlos Ciller on LinkedIn</a></p><p><a href="https://twitter.com/carlos_ciller">Carlos Ciller on X</a></p><p><a href="https://www.retinai.com">RetinAI</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, healthcare, ophthalmology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/14bb3163/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Intelligent Agriculture with Praveen Pankajakshan from Cropin</title>
      <itunes:episode>57</itunes:episode>
      <podcast:episode>57</podcast:episode>
      <itunes:title>Intelligent Agriculture with Praveen Pankajakshan from Cropin</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">0d308c10-31ee-4b18-8a2f-2bc5c5e7f5aa</guid>
      <link>https://pixelscientia.com/podcast/intelligent-agriculture-with-praveen-pankajakshan-from-cropin/</link>
      <description>
        <![CDATA[<p>AI in agriculture offers numerous benefits and plays a crucial role in addressing the challenges of feeding a growing global population while minimizing environmental impact. Joining me today is Praveen Pankajakshan, Vice President of Data Science and AI at Cropin, to talk about intelligent agriculture and how Cropin is paving the way forward for sustainable agricultural practices. Cropin is a technology company that offers services and solutions for the agriculture industry, including AI/ML models, data processing, and applications to digitize farm operations and enable data-driven decision-making.</p><p>In our conversation, Praveen discusses various aspects of how machine learning and AI are being applied to agriculture to improve farming practices, sustainability, and climate resilience. Discover how Cropin employs AI to identify crops, monitor crop health, and provide timely advice to farmers on planting and harvest timings. He highlights the importance of combining satellite data with ground-level insights and the rigorous data annotation process, emphasizing the significance of field visits. We also delve into crop-cutting experiments for machine learning, overcoming out-of-distribution (OOD) problems, how climate change makes training models difficult, and much more! Tune in and discover how Cropin is revolutionizing farming and sustainable agriculture with Praveen Pankajakshan!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Praveen's background and how he got into agriculture and machine learning.</li><li>Cropin's mission and its digitization and monitoring services for farmers.</li><li>Discover the role of machine learning in enhancing agricultural tasks.</li><li>Learn about the types of data Cropin leverages for crop digitization.</li><li>Why ground data and field visits are essential for the validation process.</li><li>Insights into the challenges of working with agriculture data.</li><li>Developing and deploying machine learning products for agriculture.</li><li>Maintaining machine learning advancements around seasons.</li><li>Agritech innovations that Praveen finds the most interesting.</li><li>Words of advice for leaders of AI-powered startups: stay grounded.</li><li>The future impact of Cropin on sustainable agricultural practices.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“There are many areas where machine learning has actually worked wonders. And I would say that because we have been digitizing farmlands now for over a decade.” — Praveen Pankajakshan</p><p><br></p><p>“One of the major challenges of working with satellite data is it definitely needs ground data [for validation].” — Praveen Pankajakshan</p><p><br></p><p>“Agriculture is very complex, and it's also very nice to work with because it's also profoundly impactful.” — Praveen Pankajakshan</p><p><br></p><p>“[In terms of development], we have to ensure that first we have some baseline models ready for deployment, for inferencing. And development happens almost simultaneously.” — Praveen Pankajakshan</p><p><br></p><p>“[I] insist more on data quality rather than the quantity of the data.” — Praveen Pankajakshan</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/praveenpankaj/">Praveen Pankajakshan on LinkedIn</a></p><p><a href="mailto:praveen@cropin.com">Praveen Pankajakshan Email</a></p><p><a href="https://www.cropin.com">Cropin</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>AI in agriculture offers numerous benefits and plays a crucial role in addressing the challenges of feeding a growing global population while minimizing environmental impact. Joining me today is Praveen Pankajakshan, Vice President of Data Science and AI at Cropin, to talk about intelligent agriculture and how Cropin is paving the way forward for sustainable agricultural practices. Cropin is a technology company that offers services and solutions for the agriculture industry, including AI/ML models, data processing, and applications to digitize farm operations and enable data-driven decision-making.</p><p>In our conversation, Praveen discusses various aspects of how machine learning and AI are being applied to agriculture to improve farming practices, sustainability, and climate resilience. Discover how Cropin employs AI to identify crops, monitor crop health, and provide timely advice to farmers on planting and harvest timings. He highlights the importance of combining satellite data with ground-level insights and the rigorous data annotation process, emphasizing the significance of field visits. We also delve into crop-cutting experiments for machine learning, overcoming out-of-distribution (OOD) problems, how climate change makes training models difficult, and much more! Tune in and discover how Cropin is revolutionizing farming and sustainable agriculture with Praveen Pankajakshan!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Praveen's background and how he got into agriculture and machine learning.</li><li>Cropin's mission and its digitization and monitoring services for farmers.</li><li>Discover the role of machine learning in enhancing agricultural tasks.</li><li>Learn about the types of data Cropin leverages for crop digitization.</li><li>Why ground data and field visits are essential for the validation process.</li><li>Insights into the challenges of working with agriculture data.</li><li>Developing and deploying machine learning products for agriculture.</li><li>Maintaining machine learning advancements around seasons.</li><li>Agritech innovations that Praveen finds the most interesting.</li><li>Words of advice for leaders of AI-powered startups: stay grounded.</li><li>The future impact of Cropin on sustainable agricultural practices.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“There are many areas where machine learning has actually worked wonders. And I would say that because we have been digitizing farmlands now for over a decade.” — Praveen Pankajakshan</p><p><br></p><p>“One of the major challenges of working with satellite data is it definitely needs ground data [for validation].” — Praveen Pankajakshan</p><p><br></p><p>“Agriculture is very complex, and it's also very nice to work with because it's also profoundly impactful.” — Praveen Pankajakshan</p><p><br></p><p>“[In terms of development], we have to ensure that first we have some baseline models ready for deployment, for inferencing. And development happens almost simultaneously.” — Praveen Pankajakshan</p><p><br></p><p>“[I] insist more on data quality rather than the quantity of the data.” — Praveen Pankajakshan</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/praveenpankaj/">Praveen Pankajakshan on LinkedIn</a></p><p><a href="mailto:praveen@cropin.com">Praveen Pankajakshan Email</a></p><p><a href="https://www.cropin.com">Cropin</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 06 Nov 2023 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/4788bbd9/276aaadd.mp3" length="47600847" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/p-oJzaufSLGssiBac-JkK1XgydLAsMRVrWXtyEPkH_k/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE1MzU2NDUv/MTY5NjUzOTc2My1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1977</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>AI in agriculture offers numerous benefits and plays a crucial role in addressing the challenges of feeding a growing global population while minimizing environmental impact. Joining me today is Praveen Pankajakshan, Vice President of Data Science and AI at Cropin, to talk about intelligent agriculture and how Cropin is paving the way forward for sustainable agricultural practices. Cropin is a technology company that offers services and solutions for the agriculture industry, including AI/ML models, data processing, and applications to digitize farm operations and enable data-driven decision-making.</p><p>In our conversation, Praveen discusses various aspects of how machine learning and AI are being applied to agriculture to improve farming practices, sustainability, and climate resilience. Discover how Cropin employs AI to identify crops, monitor crop health, and provide timely advice to farmers on planting and harvest timings. He highlights the importance of combining satellite data with ground-level insights and the rigorous data annotation process, emphasizing the significance of field visits. We also delve into crop-cutting experiments for machine learning, overcoming out-of-distribution (OOD) problems, how climate change makes training models difficult, and much more! Tune in and discover how Cropin is revolutionizing farming and sustainable agriculture with Praveen Pankajakshan!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Praveen's background and how he got into agriculture and machine learning.</li><li>Cropin's mission and its digitization and monitoring services for farmers.</li><li>Discover the role of machine learning in enhancing agricultural tasks.</li><li>Learn about the types of data Cropin leverages for crop digitization.</li><li>Why ground data and field visits are essential for the validation process.</li><li>Insights into the challenges of working with agriculture data.</li><li>Developing and deploying machine learning products for agriculture.</li><li>Maintaining machine learning advancements around seasons.</li><li>Agritech innovations that Praveen finds the most interesting.</li><li>Words of advice for leaders of AI-powered startups: stay grounded.</li><li>The future impact of Cropin on sustainable agricultural practices.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“There are many areas where machine learning has actually worked wonders. And I would say that because we have been digitizing farmlands now for over a decade.” — Praveen Pankajakshan</p><p><br></p><p>“One of the major challenges of working with satellite data is it definitely needs ground data [for validation].” — Praveen Pankajakshan</p><p><br></p><p>“Agriculture is very complex, and it's also very nice to work with because it's also profoundly impactful.” — Praveen Pankajakshan</p><p><br></p><p>“[In terms of development], we have to ensure that first we have some baseline models ready for deployment, for inferencing. And development happens almost simultaneously.” — Praveen Pankajakshan</p><p><br></p><p>“[I] insist more on data quality rather than the quantity of the data.” — Praveen Pankajakshan</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/praveenpankaj/">Praveen Pankajakshan on LinkedIn</a></p><p><a href="mailto:praveen@cropin.com">Praveen Pankajakshan Email</a></p><p><a href="https://www.cropin.com">Cropin</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, agriculture, precision agriculture</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/4788bbd9/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Preventing Heart Attacks and Strokes with Todd Villines from Elucid</title>
      <itunes:episode>56</itunes:episode>
      <podcast:episode>56</podcast:episode>
      <itunes:title>Preventing Heart Attacks and Strokes with Todd Villines from Elucid</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">1705898b-7ec8-4462-b9d1-15c8efc4d003</guid>
      <link>https://pixelscientia.com/podcast/preventing-heart-attacks-and-strokes-with-todd-villines-from-elucid/</link>
      <description>
        <![CDATA[<p>Leveraging AI to prevent heart attacks and strokes offers a significant opportunity to transform healthcare and make it more productive, personalized, and accessible. Joining me today is Todd Villines, the Chief Medical Officer of Elucid, a pioneering medical technology company at the forefront of AI-powered heart attack and stroke prevention.</p><p>We discuss how Elucid's FDA-cleared product uses cutting-edge AI to analyze and characterize arterial plaque through CT scans and the innovative aspects of Elucid's algorithms. We explore the role of machine learning in Elucid's technology, from identifying risky plaques to using fractional flow reserve derived from CT without invasive procedures.</p><p>Tuning in, you’ll learn about the importance of high-quality data annotation and the rigorous validation process required to ensure accuracy across various scenarios and demographics. We also unpack the company's approach to annotating data, avoiding bias, and using diverse data sets. To discover how Elucid is making strides in cardiovascular health and paving the way for a healthier future, don’t miss this conversation with Todd Villines!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Todd’s professional background and why he joined the team at Elucid.</li><li>Elucid’s mission and how it leverages AI for medical technology.</li><li>The role of machine learning in Elucid's technology.</li><li>Developing a fractional flow reserve derived from CT analysis.</li><li>Why data annotation is crucial for training the Elucid models.</li><li>The importance of validation and how Elucid ensures the accuracy of its product.</li><li>Challenges and limitations of working with CT images.</li><li>How the company’s technology integrates into the existing clinical workflow.</li><li>Metrics used to assess the impact of Elucid's technology.</li><li>Intelligent design, diverse datasets, and avoiding bias in AI development.</li><li>Discover Elucid's future outlook and its plans to expand.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“We’ve created proprietary algorithms that were trained by histology using traditional image processing techniques to recognize different types of plaque based on histology.” — Todd Villines</p><p><br></p><p>“In the field of medical imaging, using supervised machine learning and annotated data of very high quality and also generalizable to the clinical use case of your technology is vitally important.” — Todd Villines</p><p>“You can’t just go out and pick the very highest image quality to train your models or you’re going to end up with a very overfitted model that doesn’t generalize to the clinical use case.” — Todd Villines</p><p><br></p><p>“Just like any good clinical study, designing your AI technology is probably the most important thing. Spend the time upfront to get it right.” — Todd Villines</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/drtoddvillines/">Todd Villines on LinkedIn</a></p><p><a href="https://twitter.com/ToddVillinesMD">Todd Villines on X</a></p><p><a href="https://elucid.com">Elucid</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Leveraging AI to prevent heart attacks and strokes offers a significant opportunity to transform healthcare and make it more productive, personalized, and accessible. Joining me today is Todd Villines, the Chief Medical Officer of Elucid, a pioneering medical technology company at the forefront of AI-powered heart attack and stroke prevention.</p><p>We discuss how Elucid's FDA-cleared product uses cutting-edge AI to analyze and characterize arterial plaque through CT scans and the innovative aspects of Elucid's algorithms. We explore the role of machine learning in Elucid's technology, from identifying risky plaques to using fractional flow reserve derived from CT without invasive procedures.</p><p>Tuning in, you’ll learn about the importance of high-quality data annotation and the rigorous validation process required to ensure accuracy across various scenarios and demographics. We also unpack the company's approach to annotating data, avoiding bias, and using diverse data sets. To discover how Elucid is making strides in cardiovascular health and paving the way for a healthier future, don’t miss this conversation with Todd Villines!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Todd’s professional background and why he joined the team at Elucid.</li><li>Elucid’s mission and how it leverages AI for medical technology.</li><li>The role of machine learning in Elucid's technology.</li><li>Developing a fractional flow reserve derived from CT analysis.</li><li>Why data annotation is crucial for training the Elucid models.</li><li>The importance of validation and how Elucid ensures the accuracy of its product.</li><li>Challenges and limitations of working with CT images.</li><li>How the company’s technology integrates into the existing clinical workflow.</li><li>Metrics used to assess the impact of Elucid's technology.</li><li>Intelligent design, diverse datasets, and avoiding bias in AI development.</li><li>Discover Elucid's future outlook and its plans to expand.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“We’ve created proprietary algorithms that were trained by histology using traditional image processing techniques to recognize different types of plaque based on histology.” — Todd Villines</p><p><br></p><p>“In the field of medical imaging, using supervised machine learning and annotated data of very high quality and also generalizable to the clinical use case of your technology is vitally important.” — Todd Villines</p><p>“You can’t just go out and pick the very highest image quality to train your models or you’re going to end up with a very overfitted model that doesn’t generalize to the clinical use case.” — Todd Villines</p><p><br></p><p>“Just like any good clinical study, designing your AI technology is probably the most important thing. Spend the time upfront to get it right.” — Todd Villines</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/drtoddvillines/">Todd Villines on LinkedIn</a></p><p><a href="https://twitter.com/ToddVillinesMD">Todd Villines on X</a></p><p><a href="https://elucid.com">Elucid</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 30 Oct 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/751e78b2/755daae2.mp3" length="33375311" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/iV5xY_o50OAj2_1s2i8o02_VeQkBL9NBu4FOvciwQQY/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE1MzU2MzMv/MTY5NjUzOTE0Ny1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1386</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Leveraging AI to prevent heart attacks and strokes offers a significant opportunity to transform healthcare and make it more productive, personalized, and accessible. Joining me today is Todd Villines, the Chief Medical Officer of Elucid, a pioneering medical technology company at the forefront of AI-powered heart attack and stroke prevention.</p><p>We discuss how Elucid's FDA-cleared product uses cutting-edge AI to analyze and characterize arterial plaque through CT scans and the innovative aspects of Elucid's algorithms. We explore the role of machine learning in Elucid's technology, from identifying risky plaques to using fractional flow reserve derived from CT without invasive procedures.</p><p>Tuning in, you’ll learn about the importance of high-quality data annotation and the rigorous validation process required to ensure accuracy across various scenarios and demographics. We also unpack the company's approach to annotating data, avoiding bias, and using diverse data sets. To discover how Elucid is making strides in cardiovascular health and paving the way for a healthier future, don’t miss this conversation with Todd Villines!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Todd’s professional background and why he joined the team at Elucid.</li><li>Elucid’s mission and how it leverages AI for medical technology.</li><li>The role of machine learning in Elucid's technology.</li><li>Developing a fractional flow reserve derived from CT analysis.</li><li>Why data annotation is crucial for training the Elucid models.</li><li>The importance of validation and how Elucid ensures the accuracy of its product.</li><li>Challenges and limitations of working with CT images.</li><li>How the company’s technology integrates into the existing clinical workflow.</li><li>Metrics used to assess the impact of Elucid's technology.</li><li>Intelligent design, diverse datasets, and avoiding bias in AI development.</li><li>Discover Elucid's future outlook and its plans to expand.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“We’ve created proprietary algorithms that were trained by histology using traditional image processing techniques to recognize different types of plaque based on histology.” — Todd Villines</p><p><br></p><p>“In the field of medical imaging, using supervised machine learning and annotated data of very high quality and also generalizable to the clinical use case of your technology is vitally important.” — Todd Villines</p><p>“You can’t just go out and pick the very highest image quality to train your models or you’re going to end up with a very overfitted model that doesn’t generalize to the clinical use case.” — Todd Villines</p><p><br></p><p>“Just like any good clinical study, designing your AI technology is probably the most important thing. Spend the time upfront to get it right.” — Todd Villines</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/drtoddvillines/">Todd Villines on LinkedIn</a></p><p><a href="https://twitter.com/ToddVillinesMD">Todd Villines on X</a></p><p><a href="https://elucid.com">Elucid</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, healthcare, heart attack, stroke</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/751e78b2/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Measuring the Natural World with Kevin Lang from Agerpoint</title>
      <itunes:episode>55</itunes:episode>
      <podcast:episode>55</podcast:episode>
      <itunes:title>Measuring the Natural World with Kevin Lang from Agerpoint</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">99f4f0f1-04c6-44b1-86bf-8b7e1db1c031</guid>
      <link>https://pixelscientia.com/podcast/measuring-the-natural-world-with-kevin-lang-from-agerpoint/</link>
      <description>
        <![CDATA[<p>Measuring plant health is essential for various applications, such as agriculture and conservation biology. Being able to measure plant health effectively enhances resource use, mitigation measures, and sustainable use of ecosystems in a rapidly changing world. But how is this done?</p><p>In this episode, I am joined by Kevin Lang, the CEO and president of Agerpoint, who shares insights into their cutting-edge solutions for measuring and monitoring plants. Agerpoint is a company that provides tools and solutions for measuring and monitoring plants to gather accurate data related to forests and crops. Their spatial intelligence platform is designed to unlock valuable insights for sustainable food systems and climate solutions.</p><p>In our conversation, Kevin explains how Agerpoint harnesses the power of AI, machine learning, and 3D modeling to enhance crop management, reduce resource inputs, and promote regenerative farming practices. Learn about the innovative ways Agerpoint is leveraging existing technology, such as smartphones, to make their products more accessible and affordable. Kevin also delves into the types of data Agerpoint uses, the validation process, the challenges of analyzing plant health, and much more. Tune in as we explore the fusion of technology and nature and how it's helping shape a more sustainable and efficient future with Kevin Lang from Agerpoint!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Kevin’s professional background and the road to Agerpoint.</li><li>Details about Agerpoint and what the company specializes in.</li><li>How machine learning forms the core of Agerpoint’s technology.</li><li>The range of data modalities Agerpoint uses for its technology.</li><li>Data challenges and insights into the validation process.</li><li>Bridging the gap between technology and biology.</li><li>Agerpoint's approach to recruiting top talent.</li><li>Measuring the impact of Agerpoint’s technology.</li><li>Essential advice for AI startups: align with your investors, board, and team.</li><li>What to expect from Agerpoint in the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“The combination of the point cloud and the machine learning and automation and then putting this all together in a cloud-based system, where we can fuse these data layers together, is unique.” — Kevin Lang</p><p><br></p><p>“Machine learning really plays a critical role across multiple processes and products in our business, and it’s really the core of the Agerpoint platform.” — Kevin Lang</p><p><br></p><p>“Validation is just as much of a scientific challenge as it is a change management and communication challenge with your clients.” — Kevin Lang</p><p><br></p><p>“The impact [of our product] is about access and affordability.” — Kevin Lang</p><p><br></p><p>“We are building a company and a capability that we believe represents the next wave of digital agriculture and forestry.” — Kevin Lang</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/kevinblang/">Kevin Lang on LinkedIn</a></p><p><a href="https://twitter.com/kvnlang">Kevin Lang on X</a></p><p><a href="https://www.agerpoint.com">Agerpoint</a></p><p><a href="https://www.agerpoint.com/products/#capture-anchor">Agerpoint Capture iOS App</a></p><p><a href="https://www.agerpoint.com/blog/introducing-know-your-carbon">Know Your Carbon</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Measuring plant health is essential for various applications, such as agriculture and conservation biology. Being able to measure plant health effectively enhances resource use, mitigation measures, and sustainable use of ecosystems in a rapidly changing world. But how is this done?</p><p>In this episode, I am joined by Kevin Lang, the CEO and president of Agerpoint, who shares insights into their cutting-edge solutions for measuring and monitoring plants. Agerpoint is a company that provides tools and solutions for measuring and monitoring plants to gather accurate data related to forests and crops. Their spatial intelligence platform is designed to unlock valuable insights for sustainable food systems and climate solutions.</p><p>In our conversation, Kevin explains how Agerpoint harnesses the power of AI, machine learning, and 3D modeling to enhance crop management, reduce resource inputs, and promote regenerative farming practices. Learn about the innovative ways Agerpoint is leveraging existing technology, such as smartphones, to make their products more accessible and affordable. Kevin also delves into the types of data Agerpoint uses, the validation process, the challenges of analyzing plant health, and much more. Tune in as we explore the fusion of technology and nature and how it's helping shape a more sustainable and efficient future with Kevin Lang from Agerpoint!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Kevin’s professional background and the road to Agerpoint.</li><li>Details about Agerpoint and what the company specializes in.</li><li>How machine learning forms the core of Agerpoint’s technology.</li><li>The range of data modalities Agerpoint uses for its technology.</li><li>Data challenges and insights into the validation process.</li><li>Bridging the gap between technology and biology.</li><li>Agerpoint's approach to recruiting top talent.</li><li>Measuring the impact of Agerpoint’s technology.</li><li>Essential advice for AI startups: align with your investors, board, and team.</li><li>What to expect from Agerpoint in the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“The combination of the point cloud and the machine learning and automation and then putting this all together in a cloud-based system, where we can fuse these data layers together, is unique.” — Kevin Lang</p><p><br></p><p>“Machine learning really plays a critical role across multiple processes and products in our business, and it’s really the core of the Agerpoint platform.” — Kevin Lang</p><p><br></p><p>“Validation is just as much of a scientific challenge as it is a change management and communication challenge with your clients.” — Kevin Lang</p><p><br></p><p>“The impact [of our product] is about access and affordability.” — Kevin Lang</p><p><br></p><p>“We are building a company and a capability that we believe represents the next wave of digital agriculture and forestry.” — Kevin Lang</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/kevinblang/">Kevin Lang on LinkedIn</a></p><p><a href="https://twitter.com/kvnlang">Kevin Lang on X</a></p><p><a href="https://www.agerpoint.com">Agerpoint</a></p><p><a href="https://www.agerpoint.com/products/#capture-anchor">Agerpoint Capture iOS App</a></p><p><a href="https://www.agerpoint.com/blog/introducing-know-your-carbon">Know Your Carbon</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 23 Oct 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/5675f72b/f554c621.mp3" length="25745834" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/jkfLADSX7uxqoNY0neCkWRYY5e__ED9xBTZCW2JUE0o/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE1MzU0MjQv/MTY5NjUzODQ1Ni1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1598</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Measuring plant health is essential for various applications, such as agriculture and conservation biology. Being able to measure plant health effectively enhances resource use, mitigation measures, and sustainable use of ecosystems in a rapidly changing world. But how is this done?</p><p>In this episode, I am joined by Kevin Lang, the CEO and president of Agerpoint, who shares insights into their cutting-edge solutions for measuring and monitoring plants. Agerpoint is a company that provides tools and solutions for measuring and monitoring plants to gather accurate data related to forests and crops. Their spatial intelligence platform is designed to unlock valuable insights for sustainable food systems and climate solutions.</p><p>In our conversation, Kevin explains how Agerpoint harnesses the power of AI, machine learning, and 3D modeling to enhance crop management, reduce resource inputs, and promote regenerative farming practices. Learn about the innovative ways Agerpoint is leveraging existing technology, such as smartphones, to make their products more accessible and affordable. Kevin also delves into the types of data Agerpoint uses, the validation process, the challenges of analyzing plant health, and much more. Tune in as we explore the fusion of technology and nature and how it's helping shape a more sustainable and efficient future with Kevin Lang from Agerpoint!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Kevin’s professional background and the road to Agerpoint.</li><li>Details about Agerpoint and what the company specializes in.</li><li>How machine learning forms the core of Agerpoint’s technology.</li><li>The range of data modalities Agerpoint uses for its technology.</li><li>Data challenges and insights into the validation process.</li><li>Bridging the gap between technology and biology.</li><li>Agerpoint's approach to recruiting top talent.</li><li>Measuring the impact of Agerpoint’s technology.</li><li>Essential advice for AI startups: align with your investors, board, and team.</li><li>What to expect from Agerpoint in the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“The combination of the point cloud and the machine learning and automation and then putting this all together in a cloud-based system, where we can fuse these data layers together, is unique.” — Kevin Lang</p><p><br></p><p>“Machine learning really plays a critical role across multiple processes and products in our business, and it’s really the core of the Agerpoint platform.” — Kevin Lang</p><p><br></p><p>“Validation is just as much of a scientific challenge as it is a change management and communication challenge with your clients.” — Kevin Lang</p><p><br></p><p>“The impact [of our product] is about access and affordability.” — Kevin Lang</p><p><br></p><p>“We are building a company and a capability that we believe represents the next wave of digital agriculture and forestry.” — Kevin Lang</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/kevinblang/">Kevin Lang on LinkedIn</a></p><p><a href="https://twitter.com/kvnlang">Kevin Lang on X</a></p><p><a href="https://www.agerpoint.com">Agerpoint</a></p><p><a href="https://www.agerpoint.com/products/#capture-anchor">Agerpoint Capture iOS App</a></p><p><a href="https://www.agerpoint.com/blog/introducing-know-your-carbon">Know Your Carbon</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, vegetation</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/5675f72b/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Reducing Radiologist Burnout with Jeff Chang from Rad AI</title>
      <itunes:episode>54</itunes:episode>
      <podcast:episode>54</podcast:episode>
      <itunes:title>Reducing Radiologist Burnout with Jeff Chang from Rad AI</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">bab4dd36-d34a-4a17-96ed-526e57086689</guid>
      <link>https://pixelscientia.com/podcast/reducing-radiologist-burnout-with-jeff-chang-from-rad-ai/</link>
      <description>
        <![CDATA[<p>Burnout in the medical field is a significant and pervasive problem that affects healthcare professionals across various specialties and levels of experience. In this episode, I explore the impact of artificial intelligence on the field of radiology. My guest, Jeff Chang, the Co-founder and Chief Product Officer of Rad AI, shares his insights into the transformative power of AI in addressing critical challenges in radiology (like burnout) and improving patient care.</p><p>Discover Rad AI's groundbreaking products, including Rad AI Omni Impressions and Continuity, driven by powerful machine learning, and learn how they seamlessly integrate into clinical workflows. Jeff also speaks about measuring the impact of their products, exciting future applications for their products, Rad AI's vision for the future of global diagnostic care, and much more! Don't miss this deep dive into AI's potential in revolutionizing radiology and improving patient lives worldwide with Jeff Chang from Rad AI!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background on Jeff and the overall mission of Rad AI.</li><li>Insight into the next-generation products that Rad AI offers.</li><li>How machine learning plays a central role in their products.</li><li>Training data and the use of transformer models in Rad AI's solutions.</li><li>Certain limitations and hurdles of working with radiology reports.</li><li>Ensuring product integration with existing clinical workflow.</li><li>Exciting future applications for AI and machine learning in the space.</li><li>Ways that Rad AI measures the impact of its technology.</li><li>Essential advice for leaders of AI-powered startups.</li><li>Upcoming products from Rad AI and the company’s future impact.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“Every one of our products is centered around the latest machine learning transformative work.” — Jeff Chang</p><p><br></p><p>“There is absolutely zero change to the existing workflow. That makes it really easy for radiologists to adopt without having to change anything that they currently do.” — Jeff Chang</p><p><br></p><p>“The more you streamline both deployment and the training process, getting radiologists or your users used to using the product, the easier it becomes and the more time you save for the radiologist.” — Jeff Chang</p><p><br></p><p>“Because of the post-processing, because of the specific training on the radiologist’s historical reports, [our models] are much more accurate than the current state of the art [LLMs] for the applications that we currently provide.” — Jeff Chang</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/jechang/">Jeff Chang on LinkedIn</a></p><p><a href="https://www.radai.com">Rad AI</a></p><p><a href="https://www.linkedin.com/company/radai/">Rad AI on LinkedIn</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Burnout in the medical field is a significant and pervasive problem that affects healthcare professionals across various specialties and levels of experience. In this episode, I explore the impact of artificial intelligence on the field of radiology. My guest, Jeff Chang, the Co-founder and Chief Product Officer of Rad AI, shares his insights into the transformative power of AI in addressing critical challenges in radiology (like burnout) and improving patient care.</p><p>Discover Rad AI's groundbreaking products, including Rad AI Omni Impressions and Continuity, driven by powerful machine learning, and learn how they seamlessly integrate into clinical workflows. Jeff also speaks about measuring the impact of their products, exciting future applications for their products, Rad AI's vision for the future of global diagnostic care, and much more! Don't miss this deep dive into AI's potential in revolutionizing radiology and improving patient lives worldwide with Jeff Chang from Rad AI!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background on Jeff and the overall mission of Rad AI.</li><li>Insight into the next-generation products that Rad AI offers.</li><li>How machine learning plays a central role in their products.</li><li>Training data and the use of transformer models in Rad AI's solutions.</li><li>Certain limitations and hurdles of working with radiology reports.</li><li>Ensuring product integration with existing clinical workflow.</li><li>Exciting future applications for AI and machine learning in the space.</li><li>Ways that Rad AI measures the impact of its technology.</li><li>Essential advice for leaders of AI-powered startups.</li><li>Upcoming products from Rad AI and the company’s future impact.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“Every one of our products is centered around the latest machine learning transformative work.” — Jeff Chang</p><p><br></p><p>“There is absolutely zero change to the existing workflow. That makes it really easy for radiologists to adopt without having to change anything that they currently do.” — Jeff Chang</p><p><br></p><p>“The more you streamline both deployment and the training process, getting radiologists or your users used to using the product, the easier it becomes and the more time you save for the radiologist.” — Jeff Chang</p><p><br></p><p>“Because of the post-processing, because of the specific training on the radiologist’s historical reports, [our models] are much more accurate than the current state of the art [LLMs] for the applications that we currently provide.” — Jeff Chang</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/jechang/">Jeff Chang on LinkedIn</a></p><p><a href="https://www.radai.com">Rad AI</a></p><p><a href="https://www.linkedin.com/company/radai/">Rad AI on LinkedIn</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 16 Oct 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/731314e8/7ee6d4cc.mp3" length="24465519" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/fcmzENQOAEwpu4_iy33LFDJNVXcpFbuUAreRLmH-_wk/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE1MzU0MDcv/MTY5NjUzNzQ3My1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1014</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Burnout in the medical field is a significant and pervasive problem that affects healthcare professionals across various specialties and levels of experience. In this episode, I explore the impact of artificial intelligence on the field of radiology. My guest, Jeff Chang, the Co-founder and Chief Product Officer of Rad AI, shares his insights into the transformative power of AI in addressing critical challenges in radiology (like burnout) and improving patient care.</p><p>Discover Rad AI's groundbreaking products, including Rad AI Omni Impressions and Continuity, driven by powerful machine learning, and learn how they seamlessly integrate into clinical workflows. Jeff also speaks about measuring the impact of their products, exciting future applications for their products, Rad AI's vision for the future of global diagnostic care, and much more! Don't miss this deep dive into AI's potential in revolutionizing radiology and improving patient lives worldwide with Jeff Chang from Rad AI!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background on Jeff and the overall mission of Rad AI.</li><li>Insight into the next-generation products that Rad AI offers.</li><li>How machine learning plays a central role in their products.</li><li>Training data and the use of transformer models in Rad AI's solutions.</li><li>Certain limitations and hurdles of working with radiology reports.</li><li>Ensuring product integration with existing clinical workflow.</li><li>Exciting future applications for AI and machine learning in the space.</li><li>Ways that Rad AI measures the impact of its technology.</li><li>Essential advice for leaders of AI-powered startups.</li><li>Upcoming products from Rad AI and the company’s future impact.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“Every one of our products is centered around the latest machine learning transformative work.” — Jeff Chang</p><p><br></p><p>“There is absolutely zero change to the existing workflow. That makes it really easy for radiologists to adopt without having to change anything that they currently do.” — Jeff Chang</p><p><br></p><p>“The more you streamline both deployment and the training process, getting radiologists or your users used to using the product, the easier it becomes and the more time you save for the radiologist.” — Jeff Chang</p><p><br></p><p>“Because of the post-processing, because of the specific training on the radiologist’s historical reports, [our models] are much more accurate than the current state of the art [LLMs] for the applications that we currently provide.” — Jeff Chang</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/jechang/">Jeff Chang on LinkedIn</a></p><p><a href="https://www.radai.com">Rad AI</a></p><p><a href="https://www.linkedin.com/company/radai/">Rad AI on LinkedIn</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, radiology, healthcare</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/731314e8/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Smarter Vegetation Management with Indra den Bakker from Overstory</title>
      <itunes:episode>53</itunes:episode>
      <podcast:episode>53</podcast:episode>
      <itunes:title>Smarter Vegetation Management with Indra den Bakker from Overstory</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">f1ea755a-678b-4ac6-9dcc-56038f9cfb2f</guid>
      <link>https://pixelscientia.com/podcast/smarter-vegetation-management-with-indra-den-bakker-from-overstory/</link>
      <description>
        <![CDATA[<p>Monitoring and analyzing vegetation near power lines is an essential part of ensuring the safety, reliability, and environmental sustainability of electrical grids. It helps utilities identify and address potential issues before they become emergencies, benefiting both the utility companies and the communities they serve.</p><p>I sat down with Indra den Bakker, CEO and Co-founder of Overstory, to discuss how they are revolutionizing the sector using machine learning (ML) and satellite imagery to improve infrastructure management, specifically in the context of vegetation. Tuning in, you’ll learn about the challenges that come with satellite imagery, such as varying resolutions, lighting conditions, and geographic differences, and how Overstory navigates these hurdles to provide accurate and actionable insights. Indra delves into the iterative development process at Overstory, offering advice for AI startups and highlighting the importance of staying focused and prioritizing real-world problem-solving. To discover how Overstory uses leading indicators to track its success, hear Indra's vision for the future, and gain a deeper understanding of the transformative power of AI in addressing real-world challenges and shaping a more sustainable future, don’t miss this episode!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Indra’s background in computational intelligence and his decision to create Overstory.</li><li>Overstory’s overall mission and purpose (and why it is important.)</li><li>How machine learning is the backbone of Overstory's technology.</li><li>The challenges associated with using satellite imagery.</li><li>Insight into the iterative development process at Overstory.</li><li>Balancing deadlines with the need for research and development.</li><li>Attracting talent by leveraging the company’s mission.</li><li>Overstory’s approach to training new hires.</li><li>Measuring Overstory’s impact and what leading indicators are used.</li><li>Advice for AI startups: stay focused on solving real-world problems.</li><li>Breaking down the future outlook for Overstory with Indra.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Machine learning is really the backbone of our technology. It helps us to analyze vast amounts of data [and] satellite imagery.” — Indra den Bakker</p><p><br></p><p>“We can create a beautiful, very accurate map of all the species in the world but if that’s not actionable to our customers then there’s no use to it.” — Indra den Bakker</p><p><br></p><p>“R&amp;D is part of our DNA and what we deliver to our customers and that will always be a large part of, at least from an engineering [standpoint], a large part of the work [we do].” — Indra den Bakker</p><p><br></p><p>“Stay focused on solving real-world problems.” — Indra den Bakker</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/indradenbakker">Indra den Bakker on LinkedIn</a></p><p><a href="https://twitter.com/indradenbakker">Indra den Bakker on X</a></p><p><a href="mailto:indra@overstory.com">Indra den Bakker Email</a></p><p><a href="https://www.overstory.com">Overstory</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Monitoring and analyzing vegetation near power lines is an essential part of ensuring the safety, reliability, and environmental sustainability of electrical grids. It helps utilities identify and address potential issues before they become emergencies, benefiting both the utility companies and the communities they serve.</p><p>I sat down with Indra den Bakker, CEO and Co-founder of Overstory, to discuss how they are revolutionizing the sector using machine learning (ML) and satellite imagery to improve infrastructure management, specifically in the context of vegetation. Tuning in, you’ll learn about the challenges that come with satellite imagery, such as varying resolutions, lighting conditions, and geographic differences, and how Overstory navigates these hurdles to provide accurate and actionable insights. Indra delves into the iterative development process at Overstory, offering advice for AI startups and highlighting the importance of staying focused and prioritizing real-world problem-solving. To discover how Overstory uses leading indicators to track its success, hear Indra's vision for the future, and gain a deeper understanding of the transformative power of AI in addressing real-world challenges and shaping a more sustainable future, don’t miss this episode!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Indra’s background in computational intelligence and his decision to create Overstory.</li><li>Overstory’s overall mission and purpose (and why it is important.)</li><li>How machine learning is the backbone of Overstory's technology.</li><li>The challenges associated with using satellite imagery.</li><li>Insight into the iterative development process at Overstory.</li><li>Balancing deadlines with the need for research and development.</li><li>Attracting talent by leveraging the company’s mission.</li><li>Overstory’s approach to training new hires.</li><li>Measuring Overstory’s impact and what leading indicators are used.</li><li>Advice for AI startups: stay focused on solving real-world problems.</li><li>Breaking down the future outlook for Overstory with Indra.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Machine learning is really the backbone of our technology. It helps us to analyze vast amounts of data [and] satellite imagery.” — Indra den Bakker</p><p><br></p><p>“We can create a beautiful, very accurate map of all the species in the world but if that’s not actionable to our customers then there’s no use to it.” — Indra den Bakker</p><p><br></p><p>“R&amp;D is part of our DNA and what we deliver to our customers and that will always be a large part of, at least from an engineering [standpoint], a large part of the work [we do].” — Indra den Bakker</p><p><br></p><p>“Stay focused on solving real-world problems.” — Indra den Bakker</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/indradenbakker">Indra den Bakker on LinkedIn</a></p><p><a href="https://twitter.com/indradenbakker">Indra den Bakker on X</a></p><p><a href="mailto:indra@overstory.com">Indra den Bakker Email</a></p><p><a href="https://www.overstory.com">Overstory</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 09 Oct 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/b0bcbb72/98086ed6.mp3" length="27718150" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/TihHq3O8InHukMmGInsc4YISl37V80qOQfP1IRM0PUI/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE1MzAyOTIv/MTY5NjI2Njg5My1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1147</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Monitoring and analyzing vegetation near power lines is an essential part of ensuring the safety, reliability, and environmental sustainability of electrical grids. It helps utilities identify and address potential issues before they become emergencies, benefiting both the utility companies and the communities they serve.</p><p>I sat down with Indra den Bakker, CEO and Co-founder of Overstory, to discuss how they are revolutionizing the sector using machine learning (ML) and satellite imagery to improve infrastructure management, specifically in the context of vegetation. Tuning in, you’ll learn about the challenges that come with satellite imagery, such as varying resolutions, lighting conditions, and geographic differences, and how Overstory navigates these hurdles to provide accurate and actionable insights. Indra delves into the iterative development process at Overstory, offering advice for AI startups and highlighting the importance of staying focused and prioritizing real-world problem-solving. To discover how Overstory uses leading indicators to track its success, hear Indra's vision for the future, and gain a deeper understanding of the transformative power of AI in addressing real-world challenges and shaping a more sustainable future, don’t miss this episode!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Indra’s background in computational intelligence and his decision to create Overstory.</li><li>Overstory’s overall mission and purpose (and why it is important.)</li><li>How machine learning is the backbone of Overstory's technology.</li><li>The challenges associated with using satellite imagery.</li><li>Insight into the iterative development process at Overstory.</li><li>Balancing deadlines with the need for research and development.</li><li>Attracting talent by leveraging the company’s mission.</li><li>Overstory’s approach to training new hires.</li><li>Measuring Overstory’s impact and what leading indicators are used.</li><li>Advice for AI startups: stay focused on solving real-world problems.</li><li>Breaking down the future outlook for Overstory with Indra.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Machine learning is really the backbone of our technology. It helps us to analyze vast amounts of data [and] satellite imagery.” — Indra den Bakker</p><p><br></p><p>“We can create a beautiful, very accurate map of all the species in the world but if that’s not actionable to our customers then there’s no use to it.” — Indra den Bakker</p><p><br></p><p>“R&amp;D is part of our DNA and what we deliver to our customers and that will always be a large part of, at least from an engineering [standpoint], a large part of the work [we do].” — Indra den Bakker</p><p><br></p><p>“Stay focused on solving real-world problems.” — Indra den Bakker</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/indradenbakker">Indra den Bakker on LinkedIn</a></p><p><a href="https://twitter.com/indradenbakker">Indra den Bakker on X</a></p><p><a href="mailto:indra@overstory.com">Indra den Bakker Email</a></p><p><a href="https://www.overstory.com">Overstory</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, satellite, infrastructure management</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/b0bcbb72/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Measuring Neurological Conditions with Dirk Smeets from Icometrix</title>
      <itunes:episode>52</itunes:episode>
      <podcast:episode>52</podcast:episode>
      <itunes:title>Measuring Neurological Conditions with Dirk Smeets from Icometrix</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">ce093a94-8d69-4d7b-b2cb-98333f41b13c</guid>
      <link>https://pixelscientia.com/podcast/measuring-neurological-conditions-with-dirk-smeets-from-icometrix/</link>
      <description>
        <![CDATA[<p>Can machine learning improve the treatment of neurological diseases? Here to tell us how AI is improving the landscape of neurological care is Dirk Smeets, Chief Technology Officer of icometrix.</p><p>We kick off our conversation considering what the landscape of treatment looks like today before exploring the role of AI in matching treatment to technologies. We discuss the parallels between the outcome of ChatGPT and the implications of neurological imaging, and Dirk reveals how icometrix has been able to produce artificially intelligent machines that can carry out expert tasks. Imagining the future, we discuss different approaches to adapting 2D imaging and the advantages of taking a deep-learning approach. This episode covers the process of choosing focus areas, weighing different feature requests, the influence of the regulatory process, and Dirk’s predictions for the future of neurological treatment. Join me today to hear all this and so much more. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Dirk Smeets, Chief Technology Officer of icometrix. </li><li>The work of icometrix in treating neurological disorders.</li><li>What the landscape of neurological disease treatment looks like today.</li><li>The role of AI in matching treatment to technologies.</li><li>Parallels between the outcome of ChatGPT and neurological imaging.</li><li>Introducing artificially intelligent machines that can carry out tasks to the level of experts.</li><li>How expert-level AI care will change the way care is carried out in the coming years.</li><li>How icometrix uses machine learning in the analysis of imaging data.</li><li>Different approaches to adapting the standard process of 2D imaging through deep learning.</li><li>Advantages to this deep-learning approach.</li><li>Obstacles to developing diagnosis capabilities through machine learning.</li><li>Using clinical workflow to determine which areas to focus on with new innovations.</li><li>Choosing to embed automation into the AI process to empower practitioners.</li><li>Weighing different feature requests when choosing which to develop.</li><li>How the regulatory process impacts machine learning developments.</li><li>Building trust by publishing work for the public eye.</li><li>Dirk’s recommendation for other founders in AI technology. </li><li>Thinking of AI as the means to achieve a goal rather than the purpose. </li><li>His prediction for the future of treatment for neurological conditions and the role icometrix will play. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“One in three people will suffer in their life from a neurological condition. The societal burden for neurological conditions is the sum of kidney disease, heart disease and diabetes together.” — Dirk Smeets</p><p><br></p><p>“The field of neurological conditions is moving. There are treatments available, but the downside unfortunately, is that those medications are not working for everyone. It is still a lot of trial-and-error.”<strong> </strong>—<strong> </strong>Dirk Smeets</p><p><br></p><p>“We can build machine learning models that can do tasks at the level of experts. For example, expert radiologists. That will change the way we do current practice.” — Dirk Smeets</p><p><br></p><p>“At icometrix we find science important. It's actually almost in our DNA. The reason why is that we believe that the technology we build should be scientifically sound.” — Dirk Smeets</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/dirk-smeets-03a3364/">Dirk Smeets on LinkedIn</a></p><p><a href="https://twitter.com/dsmeets">Dirk Smeets on Twitter</a></p><p><a href="https://icometrix.com/">icometrix</a></p><p><a href="https://www.linkedin.com/company/icometrix/">icometrix on LinkedIn</a></p><p><a href="https://twitter.com/icometrix">icometrix on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Can machine learning improve the treatment of neurological diseases? Here to tell us how AI is improving the landscape of neurological care is Dirk Smeets, Chief Technology Officer of icometrix.</p><p>We kick off our conversation considering what the landscape of treatment looks like today before exploring the role of AI in matching treatment to technologies. We discuss the parallels between the outcome of ChatGPT and the implications of neurological imaging, and Dirk reveals how icometrix has been able to produce artificially intelligent machines that can carry out expert tasks. Imagining the future, we discuss different approaches to adapting 2D imaging and the advantages of taking a deep-learning approach. This episode covers the process of choosing focus areas, weighing different feature requests, the influence of the regulatory process, and Dirk’s predictions for the future of neurological treatment. Join me today to hear all this and so much more. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Dirk Smeets, Chief Technology Officer of icometrix. </li><li>The work of icometrix in treating neurological disorders.</li><li>What the landscape of neurological disease treatment looks like today.</li><li>The role of AI in matching treatment to technologies.</li><li>Parallels between the outcome of ChatGPT and neurological imaging.</li><li>Introducing artificially intelligent machines that can carry out tasks to the level of experts.</li><li>How expert-level AI care will change the way care is carried out in the coming years.</li><li>How icometrix uses machine learning in the analysis of imaging data.</li><li>Different approaches to adapting the standard process of 2D imaging through deep learning.</li><li>Advantages to this deep-learning approach.</li><li>Obstacles to developing diagnosis capabilities through machine learning.</li><li>Using clinical workflow to determine which areas to focus on with new innovations.</li><li>Choosing to embed automation into the AI process to empower practitioners.</li><li>Weighing different feature requests when choosing which to develop.</li><li>How the regulatory process impacts machine learning developments.</li><li>Building trust by publishing work for the public eye.</li><li>Dirk’s recommendation for other founders in AI technology. </li><li>Thinking of AI as the means to achieve a goal rather than the purpose. </li><li>His prediction for the future of treatment for neurological conditions and the role icometrix will play. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“One in three people will suffer in their life from a neurological condition. The societal burden for neurological conditions is the sum of kidney disease, heart disease and diabetes together.” — Dirk Smeets</p><p><br></p><p>“The field of neurological conditions is moving. There are treatments available, but the downside unfortunately, is that those medications are not working for everyone. It is still a lot of trial-and-error.”<strong> </strong>—<strong> </strong>Dirk Smeets</p><p><br></p><p>“We can build machine learning models that can do tasks at the level of experts. For example, expert radiologists. That will change the way we do current practice.” — Dirk Smeets</p><p><br></p><p>“At icometrix we find science important. It's actually almost in our DNA. The reason why is that we believe that the technology we build should be scientifically sound.” — Dirk Smeets</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/dirk-smeets-03a3364/">Dirk Smeets on LinkedIn</a></p><p><a href="https://twitter.com/dsmeets">Dirk Smeets on Twitter</a></p><p><a href="https://icometrix.com/">icometrix</a></p><p><a href="https://www.linkedin.com/company/icometrix/">icometrix on LinkedIn</a></p><p><a href="https://twitter.com/icometrix">icometrix on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 02 Oct 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/44d3c101/2f6321ab.mp3" length="32863303" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/2ttNXLJ6CRpScj6ZVMW4Lg9GMOYSjit-217da1tMdgI/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE0NjQ1Mzgv/MTY5MjMwMDE2Ni1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1365</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Can machine learning improve the treatment of neurological diseases? Here to tell us how AI is improving the landscape of neurological care is Dirk Smeets, Chief Technology Officer of icometrix.</p><p>We kick off our conversation considering what the landscape of treatment looks like today before exploring the role of AI in matching treatment to technologies. We discuss the parallels between the outcome of ChatGPT and the implications of neurological imaging, and Dirk reveals how icometrix has been able to produce artificially intelligent machines that can carry out expert tasks. Imagining the future, we discuss different approaches to adapting 2D imaging and the advantages of taking a deep-learning approach. This episode covers the process of choosing focus areas, weighing different feature requests, the influence of the regulatory process, and Dirk’s predictions for the future of neurological treatment. Join me today to hear all this and so much more. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Dirk Smeets, Chief Technology Officer of icometrix. </li><li>The work of icometrix in treating neurological disorders.</li><li>What the landscape of neurological disease treatment looks like today.</li><li>The role of AI in matching treatment to technologies.</li><li>Parallels between the outcome of ChatGPT and neurological imaging.</li><li>Introducing artificially intelligent machines that can carry out tasks to the level of experts.</li><li>How expert-level AI care will change the way care is carried out in the coming years.</li><li>How icometrix uses machine learning in the analysis of imaging data.</li><li>Different approaches to adapting the standard process of 2D imaging through deep learning.</li><li>Advantages to this deep-learning approach.</li><li>Obstacles to developing diagnosis capabilities through machine learning.</li><li>Using clinical workflow to determine which areas to focus on with new innovations.</li><li>Choosing to embed automation into the AI process to empower practitioners.</li><li>Weighing different feature requests when choosing which to develop.</li><li>How the regulatory process impacts machine learning developments.</li><li>Building trust by publishing work for the public eye.</li><li>Dirk’s recommendation for other founders in AI technology. </li><li>Thinking of AI as the means to achieve a goal rather than the purpose. </li><li>His prediction for the future of treatment for neurological conditions and the role icometrix will play. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“One in three people will suffer in their life from a neurological condition. The societal burden for neurological conditions is the sum of kidney disease, heart disease and diabetes together.” — Dirk Smeets</p><p><br></p><p>“The field of neurological conditions is moving. There are treatments available, but the downside unfortunately, is that those medications are not working for everyone. It is still a lot of trial-and-error.”<strong> </strong>—<strong> </strong>Dirk Smeets</p><p><br></p><p>“We can build machine learning models that can do tasks at the level of experts. For example, expert radiologists. That will change the way we do current practice.” — Dirk Smeets</p><p><br></p><p>“At icometrix we find science important. It's actually almost in our DNA. The reason why is that we believe that the technology we build should be scientifically sound.” — Dirk Smeets</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/dirk-smeets-03a3364/">Dirk Smeets on LinkedIn</a></p><p><a href="https://twitter.com/dsmeets">Dirk Smeets on Twitter</a></p><p><a href="https://icometrix.com/">icometrix</a></p><p><a href="https://www.linkedin.com/company/icometrix/">icometrix on LinkedIn</a></p><p><a href="https://twitter.com/icometrix">icometrix on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, computer vision, medical imaging, neurological</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/44d3c101/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Optimizing Orchard Yields with Benji Meltzer from Aerobotics</title>
      <itunes:episode>51</itunes:episode>
      <podcast:episode>51</podcast:episode>
      <itunes:title>Optimizing Orchard Yields with Benji Meltzer from Aerobotics</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">117fd810-31b2-4fb0-9bda-936881f9640c</guid>
      <link>https://pixelscientia.com/podcast/optimizing-orchard-yields-with-benji-meltzer-from-aerobotics/</link>
      <description>
        <![CDATA[<p>As the agricultural industry expands to meet increased population growth and food demand, food security becomes a matter of global importance, which is why today’s guest is using AI to help farmers optimize the health of their farms.</p><p>Benji Meltzer is the Co-founder and CTO of Aerobotics, a South African Ag-Tech startup focused on providing crop protection to farmers through early problem detection and alerts. Combining satellite data, drone imagery, and scout information, Aerobotics tracks farm performance on a tree-by-tree basis and uses machine learning (ML) to identify early-stage problems, automatically detect pests and diseases, guide farmers to these locations, and suggest solutions.</p><p>In this episode, Benji offers some deeper insight into what Aerobotics does and how they can help farmers optimize the yield of their orchards. We also discuss how they use ML to process vast amounts of complex data, the challenges they encounter in the field, and Benji’s advice for other AI startups who hope to solve real-world problems, plus so much more. For a fascinating conversation about the applications of AI in agriculture, tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An overview of Benji’s interests, formal education, and what led him to co-found Aerobotics.</li><li>What Aerobotics does and how it contributes to more sustainable agriculture.</li><li>The role that ML plays in Aerobotics’ technology.</li><li>How they gather and annotate the data needed to train different models.</li><li>Challenges they have encountered, from connectivity issues to weather conditions.</li><li>Ensuring that Aerobotics’ models can generalize to many different variations.</li><li>Why there is no one-size-fits-all approach to developing these models.</li><li>Steps to planning and developing new ML products or features.</li><li>How the seasonal nature of agriculture impacts Aerobotics’ ML development.</li><li>Benji’s advice for leaders of AI-powered startups: keep it simple!</li><li>What the future holds for Aerobotics and how they hope to expand within their niche.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We're using the performance of the crop to inform how we farm and becoming more responsive and reactive rather than farming completely preventatively.” — Benji Meltzer</p><p><br></p><p>“The role that Aerobotics is playing is building that layer of insight and understanding into how the crop is performing to enable people to make these decisions.” — Benji Meltzer</p><p><br></p><p>“At its core – this product wouldn't exist without machine learning.” — Benji Meltzer</p><p><br></p><p>“Where AI can add the most value is in using technology to reduce that complexity and to downsample and simplify information into patterns and decisions that people can actually consume. It's almost too easy to compound that complexity and not actually solve the underlying problems.” — Benji Meltzer</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.aerobotics.com/">Aerobotics</a></p><p><a href="https://www.linkedin.com/in/benjamin-meltzer/">Benji Meltzer on LinkedIn</a></p><p><a href="https://twitter.com/benjimeltzer">Benji Meltzer on X</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>As the agricultural industry expands to meet increased population growth and food demand, food security becomes a matter of global importance, which is why today’s guest is using AI to help farmers optimize the health of their farms.</p><p>Benji Meltzer is the Co-founder and CTO of Aerobotics, a South African Ag-Tech startup focused on providing crop protection to farmers through early problem detection and alerts. Combining satellite data, drone imagery, and scout information, Aerobotics tracks farm performance on a tree-by-tree basis and uses machine learning (ML) to identify early-stage problems, automatically detect pests and diseases, guide farmers to these locations, and suggest solutions.</p><p>In this episode, Benji offers some deeper insight into what Aerobotics does and how they can help farmers optimize the yield of their orchards. We also discuss how they use ML to process vast amounts of complex data, the challenges they encounter in the field, and Benji’s advice for other AI startups who hope to solve real-world problems, plus so much more. For a fascinating conversation about the applications of AI in agriculture, tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An overview of Benji’s interests, formal education, and what led him to co-found Aerobotics.</li><li>What Aerobotics does and how it contributes to more sustainable agriculture.</li><li>The role that ML plays in Aerobotics’ technology.</li><li>How they gather and annotate the data needed to train different models.</li><li>Challenges they have encountered, from connectivity issues to weather conditions.</li><li>Ensuring that Aerobotics’ models can generalize to many different variations.</li><li>Why there is no one-size-fits-all approach to developing these models.</li><li>Steps to planning and developing new ML products or features.</li><li>How the seasonal nature of agriculture impacts Aerobotics’ ML development.</li><li>Benji’s advice for leaders of AI-powered startups: keep it simple!</li><li>What the future holds for Aerobotics and how they hope to expand within their niche.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We're using the performance of the crop to inform how we farm and becoming more responsive and reactive rather than farming completely preventatively.” — Benji Meltzer</p><p><br></p><p>“The role that Aerobotics is playing is building that layer of insight and understanding into how the crop is performing to enable people to make these decisions.” — Benji Meltzer</p><p><br></p><p>“At its core – this product wouldn't exist without machine learning.” — Benji Meltzer</p><p><br></p><p>“Where AI can add the most value is in using technology to reduce that complexity and to downsample and simplify information into patterns and decisions that people can actually consume. It's almost too easy to compound that complexity and not actually solve the underlying problems.” — Benji Meltzer</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.aerobotics.com/">Aerobotics</a></p><p><a href="https://www.linkedin.com/in/benjamin-meltzer/">Benji Meltzer on LinkedIn</a></p><p><a href="https://twitter.com/benjimeltzer">Benji Meltzer on X</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 25 Sep 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/dd16480a/84fbcf87.mp3" length="28642652" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/TVF5djiOLAAarW_WndADtA1WdUAbeydQtal4tvU_CmE/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE0ODkwNTIv/MTY5Mzg1MzY2MC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1781</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>As the agricultural industry expands to meet increased population growth and food demand, food security becomes a matter of global importance, which is why today’s guest is using AI to help farmers optimize the health of their farms.</p><p>Benji Meltzer is the Co-founder and CTO of Aerobotics, a South African Ag-Tech startup focused on providing crop protection to farmers through early problem detection and alerts. Combining satellite data, drone imagery, and scout information, Aerobotics tracks farm performance on a tree-by-tree basis and uses machine learning (ML) to identify early-stage problems, automatically detect pests and diseases, guide farmers to these locations, and suggest solutions.</p><p>In this episode, Benji offers some deeper insight into what Aerobotics does and how they can help farmers optimize the yield of their orchards. We also discuss how they use ML to process vast amounts of complex data, the challenges they encounter in the field, and Benji’s advice for other AI startups who hope to solve real-world problems, plus so much more. For a fascinating conversation about the applications of AI in agriculture, tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An overview of Benji’s interests, formal education, and what led him to co-found Aerobotics.</li><li>What Aerobotics does and how it contributes to more sustainable agriculture.</li><li>The role that ML plays in Aerobotics’ technology.</li><li>How they gather and annotate the data needed to train different models.</li><li>Challenges they have encountered, from connectivity issues to weather conditions.</li><li>Ensuring that Aerobotics’ models can generalize to many different variations.</li><li>Why there is no one-size-fits-all approach to developing these models.</li><li>Steps to planning and developing new ML products or features.</li><li>How the seasonal nature of agriculture impacts Aerobotics’ ML development.</li><li>Benji’s advice for leaders of AI-powered startups: keep it simple!</li><li>What the future holds for Aerobotics and how they hope to expand within their niche.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We're using the performance of the crop to inform how we farm and becoming more responsive and reactive rather than farming completely preventatively.” — Benji Meltzer</p><p><br></p><p>“The role that Aerobotics is playing is building that layer of insight and understanding into how the crop is performing to enable people to make these decisions.” — Benji Meltzer</p><p><br></p><p>“At its core – this product wouldn't exist without machine learning.” — Benji Meltzer</p><p><br></p><p>“Where AI can add the most value is in using technology to reduce that complexity and to downsample and simplify information into patterns and decisions that people can actually consume. It's almost too easy to compound that complexity and not actually solve the underlying problems.” — Benji Meltzer</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.aerobotics.com/">Aerobotics</a></p><p><a href="https://www.linkedin.com/in/benjamin-meltzer/">Benji Meltzer on LinkedIn</a></p><p><a href="https://twitter.com/benjimeltzer">Benji Meltzer on X</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, agriculture, drones, precision agriculture, farming</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/dd16480a/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Unlocking New Drugs for Neurodegenerative Diseases with Victor Hanson-Smith from Verge Genomics</title>
      <itunes:episode>50</itunes:episode>
      <podcast:episode>50</podcast:episode>
      <itunes:title>Unlocking New Drugs for Neurodegenerative Diseases with Victor Hanson-Smith from Verge Genomics</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">863bb4fb-7e90-4251-9dbe-d9abd57df393</guid>
      <link>https://pixelscientia.com/podcast/unlocking-new-drugs-for-neurodegenerative-diseases-with-victor-hanson-smith-from-verge-genomics/</link>
      <description>
        <![CDATA[<p>Today on the Impact AI podcast, I am excited to host Victor Hanson-Smith, Head of Computational Biology at Verge Genomics. He joins me today to talk about the unlocking of new drugs for neurodegenerative diseases and their mission at Verge to make drug discovery cheaper and faster.</p><p>In our discussion, Victor tells about current Verge ventures and the important part they have in developing new drugs, the role machine learning plays, and the type of data sets they work with.  We also hear about the complexity behind “omics” data and how Verge is validating machine learning models. Victor talks passionately about the importance of team building, leadership, and company culture, and the vital role they have in establishing effective machine learning models. To hear his advice to other leaders of AI-powered startups, including the three races underway, tune into today’s episode. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>How Victor ended up at Verge Genomics as Head of Computational Biology.</li><li>How his experience with his father’s disease ignited a curiosity in him.</li><li>Verge Genomics’ ventures and why it is important in developing new drugs.</li><li>The role machine learning plays in their technology and approach. </li><li>Victor elaborates on the types of data they work with in the different models.</li><li>More about the Human Data Atlas and how it works.</li><li>He talks about the Verge Genomics model setup and functionality.</li><li>Different challenges they’ve encountered with human omics data and machine learning.</li><li>How they ensure building the most effective models using complex “omics” data.</li><li>Verge’s validation process for machine learning models. </li><li>How generative AI has influenced (or not influenced) advancements at Verge.</li><li>Advice from Victor to other leaders of AI-powered startups.</li><li>Where Victor sees the impact of Verge Genomics in three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“We like to say that Verge Genomics is a full-stack drug discovery and development company.” — Victor Hanson-Smith</p><p><br></p><p>“This revolution in systems biology has the potential for new treatments for countless human diseases and has the potential to make drug discovery cheaper and faster. Long-term, it might even transform our fundamental relationship with the concept of disease.” — Victor Hanson-Smith</p><p><br></p><p>“One of the key differentiators for Verge is that we base our discoveries in human data.” — Victor Hanson-Smith</p><p><br></p><p>“At Verge, we often say, to succeed in humans, we start in humans, and so we go direct to the source.” — Victor Hanson-Smith</p><p><br></p><p>“We believe that no single data set or a single piece of data is sufficient for the sorts of rigorous drug discovery we’re interested in. Rather, our platform combines lots of different data types and layers and we’re looking for signals that are consistent across those layers.” — Victor Hanson-Smith</p><p><br></p><p>“This problem of finding the right targets, I think, is existential and one of the most upstream problems for the drug discovery industry and that’s a problem right now I don’t think is crackable by generative AI but Verge is on the frontlines of getting us closer there.” — Victor Hanson-Smith</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/victor-hanson-smith-43507a4/">Victor Hanson-Smith on LinkedIn</a></p><p><a href="https://twitter.com/vhsvhs">Victor Hanson-Smith on Twitter</a></p><p><a href="https://www.vergegenomics.com/about">Verges Genomics</a></p><p><a href="https://www.vergegenomics.com/approach">Converge Platform</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Today on the Impact AI podcast, I am excited to host Victor Hanson-Smith, Head of Computational Biology at Verge Genomics. He joins me today to talk about the unlocking of new drugs for neurodegenerative diseases and their mission at Verge to make drug discovery cheaper and faster.</p><p>In our discussion, Victor tells about current Verge ventures and the important part they have in developing new drugs, the role machine learning plays, and the type of data sets they work with.  We also hear about the complexity behind “omics” data and how Verge is validating machine learning models. Victor talks passionately about the importance of team building, leadership, and company culture, and the vital role they have in establishing effective machine learning models. To hear his advice to other leaders of AI-powered startups, including the three races underway, tune into today’s episode. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>How Victor ended up at Verge Genomics as Head of Computational Biology.</li><li>How his experience with his father’s disease ignited a curiosity in him.</li><li>Verge Genomics’ ventures and why it is important in developing new drugs.</li><li>The role machine learning plays in their technology and approach. </li><li>Victor elaborates on the types of data they work with in the different models.</li><li>More about the Human Data Atlas and how it works.</li><li>He talks about the Verge Genomics model setup and functionality.</li><li>Different challenges they’ve encountered with human omics data and machine learning.</li><li>How they ensure building the most effective models using complex “omics” data.</li><li>Verge’s validation process for machine learning models. </li><li>How generative AI has influenced (or not influenced) advancements at Verge.</li><li>Advice from Victor to other leaders of AI-powered startups.</li><li>Where Victor sees the impact of Verge Genomics in three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“We like to say that Verge Genomics is a full-stack drug discovery and development company.” — Victor Hanson-Smith</p><p><br></p><p>“This revolution in systems biology has the potential for new treatments for countless human diseases and has the potential to make drug discovery cheaper and faster. Long-term, it might even transform our fundamental relationship with the concept of disease.” — Victor Hanson-Smith</p><p><br></p><p>“One of the key differentiators for Verge is that we base our discoveries in human data.” — Victor Hanson-Smith</p><p><br></p><p>“At Verge, we often say, to succeed in humans, we start in humans, and so we go direct to the source.” — Victor Hanson-Smith</p><p><br></p><p>“We believe that no single data set or a single piece of data is sufficient for the sorts of rigorous drug discovery we’re interested in. Rather, our platform combines lots of different data types and layers and we’re looking for signals that are consistent across those layers.” — Victor Hanson-Smith</p><p><br></p><p>“This problem of finding the right targets, I think, is existential and one of the most upstream problems for the drug discovery industry and that’s a problem right now I don’t think is crackable by generative AI but Verge is on the frontlines of getting us closer there.” — Victor Hanson-Smith</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/victor-hanson-smith-43507a4/">Victor Hanson-Smith on LinkedIn</a></p><p><a href="https://twitter.com/vhsvhs">Victor Hanson-Smith on Twitter</a></p><p><a href="https://www.vergegenomics.com/about">Verges Genomics</a></p><p><a href="https://www.vergegenomics.com/approach">Converge Platform</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 18 Sep 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/db88fb9f/2c40661c.mp3" length="40599220" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/PQxJAD7hKdKQtk2bEkdmcdEYiWoMxHrxUMzlIDVw-dg/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE0NjQ1MzQv/MTY5MjI5ODgwNy1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1686</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Today on the Impact AI podcast, I am excited to host Victor Hanson-Smith, Head of Computational Biology at Verge Genomics. He joins me today to talk about the unlocking of new drugs for neurodegenerative diseases and their mission at Verge to make drug discovery cheaper and faster.</p><p>In our discussion, Victor tells about current Verge ventures and the important part they have in developing new drugs, the role machine learning plays, and the type of data sets they work with.  We also hear about the complexity behind “omics” data and how Verge is validating machine learning models. Victor talks passionately about the importance of team building, leadership, and company culture, and the vital role they have in establishing effective machine learning models. To hear his advice to other leaders of AI-powered startups, including the three races underway, tune into today’s episode. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>How Victor ended up at Verge Genomics as Head of Computational Biology.</li><li>How his experience with his father’s disease ignited a curiosity in him.</li><li>Verge Genomics’ ventures and why it is important in developing new drugs.</li><li>The role machine learning plays in their technology and approach. </li><li>Victor elaborates on the types of data they work with in the different models.</li><li>More about the Human Data Atlas and how it works.</li><li>He talks about the Verge Genomics model setup and functionality.</li><li>Different challenges they’ve encountered with human omics data and machine learning.</li><li>How they ensure building the most effective models using complex “omics” data.</li><li>Verge’s validation process for machine learning models. </li><li>How generative AI has influenced (or not influenced) advancements at Verge.</li><li>Advice from Victor to other leaders of AI-powered startups.</li><li>Where Victor sees the impact of Verge Genomics in three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“We like to say that Verge Genomics is a full-stack drug discovery and development company.” — Victor Hanson-Smith</p><p><br></p><p>“This revolution in systems biology has the potential for new treatments for countless human diseases and has the potential to make drug discovery cheaper and faster. Long-term, it might even transform our fundamental relationship with the concept of disease.” — Victor Hanson-Smith</p><p><br></p><p>“One of the key differentiators for Verge is that we base our discoveries in human data.” — Victor Hanson-Smith</p><p><br></p><p>“At Verge, we often say, to succeed in humans, we start in humans, and so we go direct to the source.” — Victor Hanson-Smith</p><p><br></p><p>“We believe that no single data set or a single piece of data is sufficient for the sorts of rigorous drug discovery we’re interested in. Rather, our platform combines lots of different data types and layers and we’re looking for signals that are consistent across those layers.” — Victor Hanson-Smith</p><p><br></p><p>“This problem of finding the right targets, I think, is existential and one of the most upstream problems for the drug discovery industry and that’s a problem right now I don’t think is crackable by generative AI but Verge is on the frontlines of getting us closer there.” — Victor Hanson-Smith</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/victor-hanson-smith-43507a4/">Victor Hanson-Smith on LinkedIn</a></p><p><a href="https://twitter.com/vhsvhs">Victor Hanson-Smith on Twitter</a></p><p><a href="https://www.vergegenomics.com/about">Verges Genomics</a></p><p><a href="https://www.vergegenomics.com/approach">Converge Platform</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, healthcare, drug discovery, neurodegenerative</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/db88fb9f/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Climate Intelligence from Satellite Data with Abhilasha Purwar from Blue Sky Analytics</title>
      <itunes:episode>49</itunes:episode>
      <podcast:episode>49</podcast:episode>
      <itunes:title>Climate Intelligence from Satellite Data with Abhilasha Purwar from Blue Sky Analytics</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">e6220460-80d6-4302-a59e-28fc6aed241f</guid>
      <link>https://pixelscientia.com/podcast/climate-intelligence-from-satellite-data-with-abhilasha-purwar-from-blue-sky-analytics/</link>
      <description>
        <![CDATA[<p>In this episode, I sit down with Abhilasha Purwar, founder and CEO of Blue Sky Analytics, to explore the groundbreaking realm of climate intelligence derived from satellite data. Abhilasha's captivating journey, from engineering to environmental research and policy consulting, reveals her passion for addressing climate change through data and technology. Blue Sky Analytics is on a mission to bridge the gap between satellite data and actionable insights, monitoring everything from carbon projects to wildfire risks and infrastructure assets.</p><p>Discover the pivotal role of machine learning in analyzing vast amounts of satellite imagery and how it's transforming our ability to measure and combat climate change with precision. Abhilasha shares compelling examples of Blue Sky Analytics' models, from monitoring forests to assessing biodiversity and air quality. We dive into the challenges of satellite data procurement and the importance of open data and open source in advancing climate solutions. Find out how Blue Sky Analytics measures its impact, learn valuable advice for AI startup leaders, and get a glimpse of the inspiring future where all forests and lakes become digital public assets worldwide. Tune in now to discover the power of satellite data with pioneer Abhilasha Purwar!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Abhilasha's background and her journey to founding Blue Sky Analytics.</li><li>Blue Sky Analytics and how the company is helping combat climate change.</li><li>Discover the role of machine learning at Blue Sky Analytics.</li><li>Exciting applications of the Blue Sky Analytics models.</li><li>The challenges and hurdles of relying on remote sensing data.</li><li>Hear why open data and open-source software are essential.</li><li>How Blue Sky Analytics plans to bridge the paywall gap.</li><li>Fascinating and potential future applications of satellite data.</li><li>Insights into the single performance indicator Blue Sky Analytics uses.</li><li>She shares key advice for leaders of AI-powered startups.</li><li>The vision that Blue Sky Analytics has for the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“What Blue Sky really does is effectively monitor the pulse of the planet.” — Abhilasha Purwar</p><p><br></p><p>“Objectivity and numbers ground us and they serve as some sort of truth and some sort of objectivity against all kinds of these emotionally-driven debates.” — Abhilasha Purwar</p><p><br></p><p>“What machines are able to do in one day? It would take you and I 10,000 years or something to do.” — Abhilasha Purwar</p><p><br></p><p>“The bottleneck of building out that trust within the community, building out that trust for the community with other stakeholders, can really be solved if the community was to collaborate with each other.” — Abhilasha Purwar</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/abhilashapurwar/">Abhilasha Purwar on LinkedIn</a></p><p><a href="https://twitter.com/blueskylab">Abhilasha Purwar on X</a></p><p><a href="https://blueskyhq.io">Blue Sky Analytics</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I sit down with Abhilasha Purwar, founder and CEO of Blue Sky Analytics, to explore the groundbreaking realm of climate intelligence derived from satellite data. Abhilasha's captivating journey, from engineering to environmental research and policy consulting, reveals her passion for addressing climate change through data and technology. Blue Sky Analytics is on a mission to bridge the gap between satellite data and actionable insights, monitoring everything from carbon projects to wildfire risks and infrastructure assets.</p><p>Discover the pivotal role of machine learning in analyzing vast amounts of satellite imagery and how it's transforming our ability to measure and combat climate change with precision. Abhilasha shares compelling examples of Blue Sky Analytics' models, from monitoring forests to assessing biodiversity and air quality. We dive into the challenges of satellite data procurement and the importance of open data and open source in advancing climate solutions. Find out how Blue Sky Analytics measures its impact, learn valuable advice for AI startup leaders, and get a glimpse of the inspiring future where all forests and lakes become digital public assets worldwide. Tune in now to discover the power of satellite data with pioneer Abhilasha Purwar!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Abhilasha's background and her journey to founding Blue Sky Analytics.</li><li>Blue Sky Analytics and how the company is helping combat climate change.</li><li>Discover the role of machine learning at Blue Sky Analytics.</li><li>Exciting applications of the Blue Sky Analytics models.</li><li>The challenges and hurdles of relying on remote sensing data.</li><li>Hear why open data and open-source software are essential.</li><li>How Blue Sky Analytics plans to bridge the paywall gap.</li><li>Fascinating and potential future applications of satellite data.</li><li>Insights into the single performance indicator Blue Sky Analytics uses.</li><li>She shares key advice for leaders of AI-powered startups.</li><li>The vision that Blue Sky Analytics has for the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“What Blue Sky really does is effectively monitor the pulse of the planet.” — Abhilasha Purwar</p><p><br></p><p>“Objectivity and numbers ground us and they serve as some sort of truth and some sort of objectivity against all kinds of these emotionally-driven debates.” — Abhilasha Purwar</p><p><br></p><p>“What machines are able to do in one day? It would take you and I 10,000 years or something to do.” — Abhilasha Purwar</p><p><br></p><p>“The bottleneck of building out that trust within the community, building out that trust for the community with other stakeholders, can really be solved if the community was to collaborate with each other.” — Abhilasha Purwar</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/abhilashapurwar/">Abhilasha Purwar on LinkedIn</a></p><p><a href="https://twitter.com/blueskylab">Abhilasha Purwar on X</a></p><p><a href="https://blueskyhq.io">Blue Sky Analytics</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 11 Sep 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/28cc2926/d0aafeeb.mp3" length="42536751" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/CYwCVBHD0FCR0v8OBnoM7otB3E1pkCcBI2-n8ZIgbco/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE0ODkwNDYv/MTY5Mzg1MzQ3My1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1766</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In this episode, I sit down with Abhilasha Purwar, founder and CEO of Blue Sky Analytics, to explore the groundbreaking realm of climate intelligence derived from satellite data. Abhilasha's captivating journey, from engineering to environmental research and policy consulting, reveals her passion for addressing climate change through data and technology. Blue Sky Analytics is on a mission to bridge the gap between satellite data and actionable insights, monitoring everything from carbon projects to wildfire risks and infrastructure assets.</p><p>Discover the pivotal role of machine learning in analyzing vast amounts of satellite imagery and how it's transforming our ability to measure and combat climate change with precision. Abhilasha shares compelling examples of Blue Sky Analytics' models, from monitoring forests to assessing biodiversity and air quality. We dive into the challenges of satellite data procurement and the importance of open data and open source in advancing climate solutions. Find out how Blue Sky Analytics measures its impact, learn valuable advice for AI startup leaders, and get a glimpse of the inspiring future where all forests and lakes become digital public assets worldwide. Tune in now to discover the power of satellite data with pioneer Abhilasha Purwar!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Abhilasha's background and her journey to founding Blue Sky Analytics.</li><li>Blue Sky Analytics and how the company is helping combat climate change.</li><li>Discover the role of machine learning at Blue Sky Analytics.</li><li>Exciting applications of the Blue Sky Analytics models.</li><li>The challenges and hurdles of relying on remote sensing data.</li><li>Hear why open data and open-source software are essential.</li><li>How Blue Sky Analytics plans to bridge the paywall gap.</li><li>Fascinating and potential future applications of satellite data.</li><li>Insights into the single performance indicator Blue Sky Analytics uses.</li><li>She shares key advice for leaders of AI-powered startups.</li><li>The vision that Blue Sky Analytics has for the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“What Blue Sky really does is effectively monitor the pulse of the planet.” — Abhilasha Purwar</p><p><br></p><p>“Objectivity and numbers ground us and they serve as some sort of truth and some sort of objectivity against all kinds of these emotionally-driven debates.” — Abhilasha Purwar</p><p><br></p><p>“What machines are able to do in one day? It would take you and I 10,000 years or something to do.” — Abhilasha Purwar</p><p><br></p><p>“The bottleneck of building out that trust within the community, building out that trust for the community with other stakeholders, can really be solved if the community was to collaborate with each other.” — Abhilasha Purwar</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/abhilashapurwar/">Abhilasha Purwar on LinkedIn</a></p><p><a href="https://twitter.com/blueskylab">Abhilasha Purwar on X</a></p><p><a href="https://blueskyhq.io">Blue Sky Analytics</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, remote sensing, satellite, geospatial, climate intelligence</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/28cc2926/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Targeted Cancer Treatments with Rafael Rosengarten from Genialis</title>
      <itunes:episode>48</itunes:episode>
      <podcast:episode>48</podcast:episode>
      <itunes:title>Targeted Cancer Treatments with Rafael Rosengarten from Genialis</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">c264db41-66f7-44dd-aa55-91ab0db49112</guid>
      <link>https://pixelscientia.com/podcast/targeted-cancer-treatments-with-rafael-rosengarten-from-genialis/</link>
      <description>
        <![CDATA[<p>New research and technology are radically transforming cancer treatment, and, today, we find out how. I am joined by Genialis CEO and Co-Founder, Rafael Rosengarten to discuss his company’s mission to “outsmart cancer.” Genialis is revolutionizing cancer care by developing AI models that decode the biology behind different types of cancer and identify the most effective therapies for individual patients.</p><p>In this episode, we discover how Genialis’ innovative approach of turning RNA sequencing data into tumor phenotype classification is remolding the landscape of precision medicine. Rafael explains the company’s methods of handling the high dimensionality and sparseness of sequencing data while addressing bias issues, filling us in on why they use shallower artificial intelligence architectures for algorithm training and more. Join us as we explore the cutting-edge world of personalized cancer treatments that are shaping the future of oncology.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Genialis CEO and Co-Founder, Rafael Rosengarten’s background; what led him to Genialis.</li><li>How Genialis applies machine learning to help patients find personalized cancer treatments.</li><li>Their collaboration with drug and diagnostics companies to deploy their models.</li><li>How the models use RNA sequencing data to predict and classify tumor phenotypes.</li><li>The challenges encountered when training models with sequencing data.</li><li>Rafael defines sequencing data.</li><li>Why RNA sequencing for clinical applications is considered cutting-edge.</li><li>Genialis’ methods for handling the high dimensionality and sparseness of sequencing data.</li><li>The various sources of bias and how they have addressed these issues.</li><li>Why they use shallower artificial intelligence architectures for algorithm training.</li><li>How the FDA’s regulatory process affects how Genialis develops and validates its models.</li><li>The benefits the Genialis team has seen from publishing research articles.</li><li>How they measure the impact of their technology.</li><li>Rafael’s advice to other leaders of AI-powered startups.</li><li>The hyper-commoditization of AI technologies.</li><li>Rafael predicts the future impact of Genialis and shares his goals for the company.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[Genialis applies] machine learning to try to help patients find the best drugs for their disease, to help realize the promise of precision medicine.” — Rafael Rosengarten</p><p><br></p><p>“The models are learning the fundamental biological nature of the disease. From that, we can extrapolate what the best intervention will be.” — Rafael Rosengarten</p><p><br></p><p>“Not all genes have detectable expression at once. Certainly, not all genes are going to be informative. We've built really beautiful software that allows us to aggregate these kinds of sequencing data, to process them in a very uniform way.” — Rafael Rosengarten</p><p><br></p><p>“It really is a pan-cancer model, even though it was trained on a data set that was just gastric cancer. And it works on RNA sequencing of all different chemistries, even though it was trained on microarray” — Rafael Rosengarten</p><p><br></p><p>“The key with algorithm training, of course, is to try to avoid what's known as overfitting.” — Rafael Rosengarten</p><p><br></p><p>“Every phenotype that our model predicts whether it's phenotype A, B, C, or D, has a different therapeutic hypothesis.” — Rafael Rosengarten</p><p><br></p><p>“AI technologies right now are becoming hyper-commoditized.” — Rafael Rosengarten</p><p><br></p><p>“It is still possible for small companies to come up with really innovative algorithms — but for the most part, it really matters how you deploy these technologies.” — Rafael Rosengarten</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/rrosengarten/">Rafael Rosengarten on LinkedIn</a></p><p><a href="https://twitter.com/rafecooks">Rafael Rosengarten on Twitter</a></p><p><a href="https://www.genialis.com/">Genialis</a></p><p><a href="https://www.linkedin.com/company/genialis/">Genialis on LinkedIn</a></p><p><a href="https://twitter.com/genialis">Genialis on Twitter</a></p><p><a href="https://podcasts.apple.com/us/podcast/talking-precision-medicine/id1448530203">Talking Precision Medicine Podcast</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>New research and technology are radically transforming cancer treatment, and, today, we find out how. I am joined by Genialis CEO and Co-Founder, Rafael Rosengarten to discuss his company’s mission to “outsmart cancer.” Genialis is revolutionizing cancer care by developing AI models that decode the biology behind different types of cancer and identify the most effective therapies for individual patients.</p><p>In this episode, we discover how Genialis’ innovative approach of turning RNA sequencing data into tumor phenotype classification is remolding the landscape of precision medicine. Rafael explains the company’s methods of handling the high dimensionality and sparseness of sequencing data while addressing bias issues, filling us in on why they use shallower artificial intelligence architectures for algorithm training and more. Join us as we explore the cutting-edge world of personalized cancer treatments that are shaping the future of oncology.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Genialis CEO and Co-Founder, Rafael Rosengarten’s background; what led him to Genialis.</li><li>How Genialis applies machine learning to help patients find personalized cancer treatments.</li><li>Their collaboration with drug and diagnostics companies to deploy their models.</li><li>How the models use RNA sequencing data to predict and classify tumor phenotypes.</li><li>The challenges encountered when training models with sequencing data.</li><li>Rafael defines sequencing data.</li><li>Why RNA sequencing for clinical applications is considered cutting-edge.</li><li>Genialis’ methods for handling the high dimensionality and sparseness of sequencing data.</li><li>The various sources of bias and how they have addressed these issues.</li><li>Why they use shallower artificial intelligence architectures for algorithm training.</li><li>How the FDA’s regulatory process affects how Genialis develops and validates its models.</li><li>The benefits the Genialis team has seen from publishing research articles.</li><li>How they measure the impact of their technology.</li><li>Rafael’s advice to other leaders of AI-powered startups.</li><li>The hyper-commoditization of AI technologies.</li><li>Rafael predicts the future impact of Genialis and shares his goals for the company.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[Genialis applies] machine learning to try to help patients find the best drugs for their disease, to help realize the promise of precision medicine.” — Rafael Rosengarten</p><p><br></p><p>“The models are learning the fundamental biological nature of the disease. From that, we can extrapolate what the best intervention will be.” — Rafael Rosengarten</p><p><br></p><p>“Not all genes have detectable expression at once. Certainly, not all genes are going to be informative. We've built really beautiful software that allows us to aggregate these kinds of sequencing data, to process them in a very uniform way.” — Rafael Rosengarten</p><p><br></p><p>“It really is a pan-cancer model, even though it was trained on a data set that was just gastric cancer. And it works on RNA sequencing of all different chemistries, even though it was trained on microarray” — Rafael Rosengarten</p><p><br></p><p>“The key with algorithm training, of course, is to try to avoid what's known as overfitting.” — Rafael Rosengarten</p><p><br></p><p>“Every phenotype that our model predicts whether it's phenotype A, B, C, or D, has a different therapeutic hypothesis.” — Rafael Rosengarten</p><p><br></p><p>“AI technologies right now are becoming hyper-commoditized.” — Rafael Rosengarten</p><p><br></p><p>“It is still possible for small companies to come up with really innovative algorithms — but for the most part, it really matters how you deploy these technologies.” — Rafael Rosengarten</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/rrosengarten/">Rafael Rosengarten on LinkedIn</a></p><p><a href="https://twitter.com/rafecooks">Rafael Rosengarten on Twitter</a></p><p><a href="https://www.genialis.com/">Genialis</a></p><p><a href="https://www.linkedin.com/company/genialis/">Genialis on LinkedIn</a></p><p><a href="https://twitter.com/genialis">Genialis on Twitter</a></p><p><a href="https://podcasts.apple.com/us/podcast/talking-precision-medicine/id1448530203">Talking Precision Medicine Podcast</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 04 Sep 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/5d1df595/9ddb16ca.mp3" length="35091327" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/-GuNKL0jUOMh8AyI_fiW9VercExXr7TCNO2x7GwCh6I/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE0MDY3MDcv/MTY4ODQzNzgzOC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1458</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>New research and technology are radically transforming cancer treatment, and, today, we find out how. I am joined by Genialis CEO and Co-Founder, Rafael Rosengarten to discuss his company’s mission to “outsmart cancer.” Genialis is revolutionizing cancer care by developing AI models that decode the biology behind different types of cancer and identify the most effective therapies for individual patients.</p><p>In this episode, we discover how Genialis’ innovative approach of turning RNA sequencing data into tumor phenotype classification is remolding the landscape of precision medicine. Rafael explains the company’s methods of handling the high dimensionality and sparseness of sequencing data while addressing bias issues, filling us in on why they use shallower artificial intelligence architectures for algorithm training and more. Join us as we explore the cutting-edge world of personalized cancer treatments that are shaping the future of oncology.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Genialis CEO and Co-Founder, Rafael Rosengarten’s background; what led him to Genialis.</li><li>How Genialis applies machine learning to help patients find personalized cancer treatments.</li><li>Their collaboration with drug and diagnostics companies to deploy their models.</li><li>How the models use RNA sequencing data to predict and classify tumor phenotypes.</li><li>The challenges encountered when training models with sequencing data.</li><li>Rafael defines sequencing data.</li><li>Why RNA sequencing for clinical applications is considered cutting-edge.</li><li>Genialis’ methods for handling the high dimensionality and sparseness of sequencing data.</li><li>The various sources of bias and how they have addressed these issues.</li><li>Why they use shallower artificial intelligence architectures for algorithm training.</li><li>How the FDA’s regulatory process affects how Genialis develops and validates its models.</li><li>The benefits the Genialis team has seen from publishing research articles.</li><li>How they measure the impact of their technology.</li><li>Rafael’s advice to other leaders of AI-powered startups.</li><li>The hyper-commoditization of AI technologies.</li><li>Rafael predicts the future impact of Genialis and shares his goals for the company.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[Genialis applies] machine learning to try to help patients find the best drugs for their disease, to help realize the promise of precision medicine.” — Rafael Rosengarten</p><p><br></p><p>“The models are learning the fundamental biological nature of the disease. From that, we can extrapolate what the best intervention will be.” — Rafael Rosengarten</p><p><br></p><p>“Not all genes have detectable expression at once. Certainly, not all genes are going to be informative. We've built really beautiful software that allows us to aggregate these kinds of sequencing data, to process them in a very uniform way.” — Rafael Rosengarten</p><p><br></p><p>“It really is a pan-cancer model, even though it was trained on a data set that was just gastric cancer. And it works on RNA sequencing of all different chemistries, even though it was trained on microarray” — Rafael Rosengarten</p><p><br></p><p>“The key with algorithm training, of course, is to try to avoid what's known as overfitting.” — Rafael Rosengarten</p><p><br></p><p>“Every phenotype that our model predicts whether it's phenotype A, B, C, or D, has a different therapeutic hypothesis.” — Rafael Rosengarten</p><p><br></p><p>“AI technologies right now are becoming hyper-commoditized.” — Rafael Rosengarten</p><p><br></p><p>“It is still possible for small companies to come up with really innovative algorithms — but for the most part, it really matters how you deploy these technologies.” — Rafael Rosengarten</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/rrosengarten/">Rafael Rosengarten on LinkedIn</a></p><p><a href="https://twitter.com/rafecooks">Rafael Rosengarten on Twitter</a></p><p><a href="https://www.genialis.com/">Genialis</a></p><p><a href="https://www.linkedin.com/company/genialis/">Genialis on LinkedIn</a></p><p><a href="https://twitter.com/genialis">Genialis on Twitter</a></p><p><a href="https://podcasts.apple.com/us/podcast/talking-precision-medicine/id1448530203">Talking Precision Medicine Podcast</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, precision medicine, cancer research</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/5d1df595/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Curating Medical Image Datasets with Jie Wu from Segmed</title>
      <itunes:episode>47</itunes:episode>
      <podcast:episode>47</podcast:episode>
      <itunes:title>Curating Medical Image Datasets with Jie Wu from Segmed</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">2465d0e9-2fcb-4d9c-87a0-a83372ec2fef</guid>
      <link>https://pixelscientia.com/podcast/curating-medical-image-datasets-with-jie-wu-from-segmed/</link>
      <description>
        <![CDATA[<p>The accelerated development of medical AI could be life-changing for patients. Unfortunately, accessing large amounts of diverse, standardized data has been a major stumbling block to progress. That’s where Segmed comes in, a platform that allows researchers to access diverse, high-quality, and de-identified medical imaging data. Crucially, Segmed’s platform also provides data for medical AI training and validation.</p><p>I am joined today, by Segmed’s co-founder, Jie Wu, to discuss how they are solving key data issues to rapidly accelerate medical AI development. You’ll hear Jie break down some of the biggest challenges in curating medical image datasets — including the extra computational power needed to handle high-res medical images, like CT scans — and how they are addressing these obstacles. Jie also takes the time to emphasize the need for diversity when curating medical image datasets and the importance of mitigating bias during the data curation phase. To learn more about Segmed and how they are contributing to the development of medical AI, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A warm welcome to Jie Wu, co-founder of Segmed.</li><li>Insight into how Segmed is solving data issues to accelerate medical AI development.</li><li>Why solutions to these data issues are crucial for medical research.</li><li>Segmed’s focus on medical imaging data.</li><li>Their approach to different imaging modalities.</li><li>An overview of the key challenges in curating medical image datasets.</li><li>How Segmed determines the amount of data they will need.</li><li>Best practices for curating a training set of medical images.</li><li>Why collecting a diverse range of images is essential.</li><li>An overview of how the quality of labels is assessed by experts.</li><li>How imaging modality influences Segmed’s approach to creating datasets.</li><li>The variations in datasets across different imaging pathologies.</li><li>Special considerations that inform the validation set versus the training set.</li><li>How bias manifests in models trained on medical images.</li><li>Steps that can be taken to mitigate bias during the data curation phase.</li><li>How the need for diverse datasets has increased along with greater awareness of bias.</li><li>Jie’s thoughts on the future of foundation models in the medical AI space.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“A high-resolution of CT can take up to several gigabytes of storage itself.” — Jie Wu</p><p><br></p><p>“I think the most important piece is actually to collect as diversely as possible. So I ask that given the budget limit or maybe time limit, the size of the data set will be limited but it should be at least representative of the target population and targeted practice.” — Jie Wu</p><p><br></p><p>“The best quality labels are curated by experts and it is curated by multiple experts.” — Jie Wu</p><p><br></p><p>“A 3D image stores much more information than the 2D images, so you need less data for that.” — Jie Wu</p><p><br></p><p>“The external validation datasets require much more carefully curated datasets and much higher quality labels, and also it needs to be representative of the population, of the institutions, and also geographical locations.” — Jie Wu</p><p><br></p><p>“We hope that we can enter into the development of AI and make these algorithms go to market faster and benefit more people.” — Jie Wu</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/jie-w-467600b3/">Jie Wu on LinkedIn</a></p><p><a href="https://www.segmed.ai/">Segmed</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>The accelerated development of medical AI could be life-changing for patients. Unfortunately, accessing large amounts of diverse, standardized data has been a major stumbling block to progress. That’s where Segmed comes in, a platform that allows researchers to access diverse, high-quality, and de-identified medical imaging data. Crucially, Segmed’s platform also provides data for medical AI training and validation.</p><p>I am joined today, by Segmed’s co-founder, Jie Wu, to discuss how they are solving key data issues to rapidly accelerate medical AI development. You’ll hear Jie break down some of the biggest challenges in curating medical image datasets — including the extra computational power needed to handle high-res medical images, like CT scans — and how they are addressing these obstacles. Jie also takes the time to emphasize the need for diversity when curating medical image datasets and the importance of mitigating bias during the data curation phase. To learn more about Segmed and how they are contributing to the development of medical AI, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A warm welcome to Jie Wu, co-founder of Segmed.</li><li>Insight into how Segmed is solving data issues to accelerate medical AI development.</li><li>Why solutions to these data issues are crucial for medical research.</li><li>Segmed’s focus on medical imaging data.</li><li>Their approach to different imaging modalities.</li><li>An overview of the key challenges in curating medical image datasets.</li><li>How Segmed determines the amount of data they will need.</li><li>Best practices for curating a training set of medical images.</li><li>Why collecting a diverse range of images is essential.</li><li>An overview of how the quality of labels is assessed by experts.</li><li>How imaging modality influences Segmed’s approach to creating datasets.</li><li>The variations in datasets across different imaging pathologies.</li><li>Special considerations that inform the validation set versus the training set.</li><li>How bias manifests in models trained on medical images.</li><li>Steps that can be taken to mitigate bias during the data curation phase.</li><li>How the need for diverse datasets has increased along with greater awareness of bias.</li><li>Jie’s thoughts on the future of foundation models in the medical AI space.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“A high-resolution of CT can take up to several gigabytes of storage itself.” — Jie Wu</p><p><br></p><p>“I think the most important piece is actually to collect as diversely as possible. So I ask that given the budget limit or maybe time limit, the size of the data set will be limited but it should be at least representative of the target population and targeted practice.” — Jie Wu</p><p><br></p><p>“The best quality labels are curated by experts and it is curated by multiple experts.” — Jie Wu</p><p><br></p><p>“A 3D image stores much more information than the 2D images, so you need less data for that.” — Jie Wu</p><p><br></p><p>“The external validation datasets require much more carefully curated datasets and much higher quality labels, and also it needs to be representative of the population, of the institutions, and also geographical locations.” — Jie Wu</p><p><br></p><p>“We hope that we can enter into the development of AI and make these algorithms go to market faster and benefit more people.” — Jie Wu</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/jie-w-467600b3/">Jie Wu on LinkedIn</a></p><p><a href="https://www.segmed.ai/">Segmed</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 28 Aug 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/8679c608/5b7c0042.mp3" length="24457839" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/j0FGebmwoYGFm_gqzUXsZ_0571aYP2JRarrPM5DTffU/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE0NjQ1Mjkv/MTY5MjI5ODM5My1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1013</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>The accelerated development of medical AI could be life-changing for patients. Unfortunately, accessing large amounts of diverse, standardized data has been a major stumbling block to progress. That’s where Segmed comes in, a platform that allows researchers to access diverse, high-quality, and de-identified medical imaging data. Crucially, Segmed’s platform also provides data for medical AI training and validation.</p><p>I am joined today, by Segmed’s co-founder, Jie Wu, to discuss how they are solving key data issues to rapidly accelerate medical AI development. You’ll hear Jie break down some of the biggest challenges in curating medical image datasets — including the extra computational power needed to handle high-res medical images, like CT scans — and how they are addressing these obstacles. Jie also takes the time to emphasize the need for diversity when curating medical image datasets and the importance of mitigating bias during the data curation phase. To learn more about Segmed and how they are contributing to the development of medical AI, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A warm welcome to Jie Wu, co-founder of Segmed.</li><li>Insight into how Segmed is solving data issues to accelerate medical AI development.</li><li>Why solutions to these data issues are crucial for medical research.</li><li>Segmed’s focus on medical imaging data.</li><li>Their approach to different imaging modalities.</li><li>An overview of the key challenges in curating medical image datasets.</li><li>How Segmed determines the amount of data they will need.</li><li>Best practices for curating a training set of medical images.</li><li>Why collecting a diverse range of images is essential.</li><li>An overview of how the quality of labels is assessed by experts.</li><li>How imaging modality influences Segmed’s approach to creating datasets.</li><li>The variations in datasets across different imaging pathologies.</li><li>Special considerations that inform the validation set versus the training set.</li><li>How bias manifests in models trained on medical images.</li><li>Steps that can be taken to mitigate bias during the data curation phase.</li><li>How the need for diverse datasets has increased along with greater awareness of bias.</li><li>Jie’s thoughts on the future of foundation models in the medical AI space.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“A high-resolution of CT can take up to several gigabytes of storage itself.” — Jie Wu</p><p><br></p><p>“I think the most important piece is actually to collect as diversely as possible. So I ask that given the budget limit or maybe time limit, the size of the data set will be limited but it should be at least representative of the target population and targeted practice.” — Jie Wu</p><p><br></p><p>“The best quality labels are curated by experts and it is curated by multiple experts.” — Jie Wu</p><p><br></p><p>“A 3D image stores much more information than the 2D images, so you need less data for that.” — Jie Wu</p><p><br></p><p>“The external validation datasets require much more carefully curated datasets and much higher quality labels, and also it needs to be representative of the population, of the institutions, and also geographical locations.” — Jie Wu</p><p><br></p><p>“We hope that we can enter into the development of AI and make these algorithms go to market faster and benefit more people.” — Jie Wu</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/jie-w-467600b3/">Jie Wu on LinkedIn</a></p><p><a href="https://www.segmed.ai/">Segmed</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, computer vision, healthcare, medical imaging</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/8679c608/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Conquering Cough with Joe Brew from Hyfe</title>
      <itunes:episode>46</itunes:episode>
      <podcast:episode>46</podcast:episode>
      <itunes:title>Conquering Cough with Joe Brew from Hyfe</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">f79ebbdc-d04c-4bd4-988a-7197dbc4c79f</guid>
      <link>https://pixelscientia.com/podcast/conquering-cough-with-joe-brew-from-hyfe/</link>
      <description>
        <![CDATA[<p>These days, it seems that there are a lot of big problems in the world, especially in healthcare. Our guest today believes that there is massive value in tackling smaller problems, and, sometimes, the smaller problems are the most important to solve.</p><p>I welcome to the show today Joe Brew, Co-Founder and CEO of Hyfe, and he is here to talk about detecting and tracking coughing. We hear about what led to the founding of the company Hyfe and why they’ve narrowed their respiratory health innovations down to focus on cough. Joe talks about the role of machine learning, the process of gathering cough examples, and how they train their models. He touches on challenges they’ve faced, navigating model performance in varying environments, and the benefits of publishing their work. To hear more about why Joe believes now is the time to build this type of technology don’t miss out on this episode.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>How the movement of pathogens through our bodies and communities eventually led to the founding of Hyfe.</li><li>What Hyfe does with respiratory health and why it’s important in overall healthcare.</li><li>Why they’ve narrowed their focus down to the cough.</li><li>The role machine learning plays in their cough-count technology.</li><li>He explains more about acoustic epidemiology. </li><li>The process of gathering cough examples and annotating them to train their models.</li><li>We explore the challenges faced working with and training models on audio data.</li><li>Navigating model performance in varying environments: working well in the real world.</li><li>Joe shares thoughts on the benefits of publishing their work.</li><li>Why now was the time to build this type of technology.</li><li>How Joe and his team are measuring the impact of their technology </li><li>Joe offers advice to other leaders of AI startups. </li><li>We talk about the potential impact of Hyfe in three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“I realized that there are so many global health problems that are addressable, at least partially by tech. I hesitate to say, solvable, but addressable.” — Joe Brew</p><p><br></p><p>“The really big problem that Hyfe is tackling is around respiratory health.” — Joe Brew</p><p><br></p><p>“It felt to us that cough is perhaps, the lowest-hanging fruit, the area where the additionality of tech is greatest, because it's so prevalent and because it's currently just the status quo is so poor.” — Joe Brew</p><p><br></p><p>“If you really want reliable medical grade annotations, you need reliable medical grade input. Garbage in, garbage out. That's why the only way to really do that is through partnerships with medical professionals.” — Joe Brew</p><p><br></p><p>“A method, that if I were to start another company or to do another project, I would absolutely repeat, is to go quickly to the market, start collecting data, real-world data really quickly, and build a feedback loop where you're constantly training, testing, validating on real-world data.” — Joe Brew</p><p><br></p><p>“Our aim is not just to get nice comments on the App Store or nice emails. It's to impact the lives of millions. Everybody who breathes has lungs and everybody with lungs coughs. We think cough tracking is for everybody.” — Joe Brew</p><p><br></p><p>“Don't be turned off by problems that appear simple. Sometimes the simplest problems are the ones that are the most important to solve.” — Joe Brew</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/joe-brew-1061a33a/">Joe Brew on LinkedIn</a></p><p><a href="https://twitter.com/joethebrew?lang=en">Joe Brew on Twitter</a></p><p><a href="https://www.hyfe.ai/">Hyfe AI</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>These days, it seems that there are a lot of big problems in the world, especially in healthcare. Our guest today believes that there is massive value in tackling smaller problems, and, sometimes, the smaller problems are the most important to solve.</p><p>I welcome to the show today Joe Brew, Co-Founder and CEO of Hyfe, and he is here to talk about detecting and tracking coughing. We hear about what led to the founding of the company Hyfe and why they’ve narrowed their respiratory health innovations down to focus on cough. Joe talks about the role of machine learning, the process of gathering cough examples, and how they train their models. He touches on challenges they’ve faced, navigating model performance in varying environments, and the benefits of publishing their work. To hear more about why Joe believes now is the time to build this type of technology don’t miss out on this episode.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>How the movement of pathogens through our bodies and communities eventually led to the founding of Hyfe.</li><li>What Hyfe does with respiratory health and why it’s important in overall healthcare.</li><li>Why they’ve narrowed their focus down to the cough.</li><li>The role machine learning plays in their cough-count technology.</li><li>He explains more about acoustic epidemiology. </li><li>The process of gathering cough examples and annotating them to train their models.</li><li>We explore the challenges faced working with and training models on audio data.</li><li>Navigating model performance in varying environments: working well in the real world.</li><li>Joe shares thoughts on the benefits of publishing their work.</li><li>Why now was the time to build this type of technology.</li><li>How Joe and his team are measuring the impact of their technology </li><li>Joe offers advice to other leaders of AI startups. </li><li>We talk about the potential impact of Hyfe in three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“I realized that there are so many global health problems that are addressable, at least partially by tech. I hesitate to say, solvable, but addressable.” — Joe Brew</p><p><br></p><p>“The really big problem that Hyfe is tackling is around respiratory health.” — Joe Brew</p><p><br></p><p>“It felt to us that cough is perhaps, the lowest-hanging fruit, the area where the additionality of tech is greatest, because it's so prevalent and because it's currently just the status quo is so poor.” — Joe Brew</p><p><br></p><p>“If you really want reliable medical grade annotations, you need reliable medical grade input. Garbage in, garbage out. That's why the only way to really do that is through partnerships with medical professionals.” — Joe Brew</p><p><br></p><p>“A method, that if I were to start another company or to do another project, I would absolutely repeat, is to go quickly to the market, start collecting data, real-world data really quickly, and build a feedback loop where you're constantly training, testing, validating on real-world data.” — Joe Brew</p><p><br></p><p>“Our aim is not just to get nice comments on the App Store or nice emails. It's to impact the lives of millions. Everybody who breathes has lungs and everybody with lungs coughs. We think cough tracking is for everybody.” — Joe Brew</p><p><br></p><p>“Don't be turned off by problems that appear simple. Sometimes the simplest problems are the ones that are the most important to solve.” — Joe Brew</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/joe-brew-1061a33a/">Joe Brew on LinkedIn</a></p><p><a href="https://twitter.com/joethebrew?lang=en">Joe Brew on Twitter</a></p><p><a href="https://www.hyfe.ai/">Hyfe AI</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 21 Aug 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/9700c0fe/984ac487.mp3" length="46912733" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/hLEpUlnq__I1dLcyUxQfONFOhRZ9R8TPCme9IF08xtY/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE0MDY3MDYv/MTY4ODQzNzY0Ni1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1950</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>These days, it seems that there are a lot of big problems in the world, especially in healthcare. Our guest today believes that there is massive value in tackling smaller problems, and, sometimes, the smaller problems are the most important to solve.</p><p>I welcome to the show today Joe Brew, Co-Founder and CEO of Hyfe, and he is here to talk about detecting and tracking coughing. We hear about what led to the founding of the company Hyfe and why they’ve narrowed their respiratory health innovations down to focus on cough. Joe talks about the role of machine learning, the process of gathering cough examples, and how they train their models. He touches on challenges they’ve faced, navigating model performance in varying environments, and the benefits of publishing their work. To hear more about why Joe believes now is the time to build this type of technology don’t miss out on this episode.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>How the movement of pathogens through our bodies and communities eventually led to the founding of Hyfe.</li><li>What Hyfe does with respiratory health and why it’s important in overall healthcare.</li><li>Why they’ve narrowed their focus down to the cough.</li><li>The role machine learning plays in their cough-count technology.</li><li>He explains more about acoustic epidemiology. </li><li>The process of gathering cough examples and annotating them to train their models.</li><li>We explore the challenges faced working with and training models on audio data.</li><li>Navigating model performance in varying environments: working well in the real world.</li><li>Joe shares thoughts on the benefits of publishing their work.</li><li>Why now was the time to build this type of technology.</li><li>How Joe and his team are measuring the impact of their technology </li><li>Joe offers advice to other leaders of AI startups. </li><li>We talk about the potential impact of Hyfe in three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“I realized that there are so many global health problems that are addressable, at least partially by tech. I hesitate to say, solvable, but addressable.” — Joe Brew</p><p><br></p><p>“The really big problem that Hyfe is tackling is around respiratory health.” — Joe Brew</p><p><br></p><p>“It felt to us that cough is perhaps, the lowest-hanging fruit, the area where the additionality of tech is greatest, because it's so prevalent and because it's currently just the status quo is so poor.” — Joe Brew</p><p><br></p><p>“If you really want reliable medical grade annotations, you need reliable medical grade input. Garbage in, garbage out. That's why the only way to really do that is through partnerships with medical professionals.” — Joe Brew</p><p><br></p><p>“A method, that if I were to start another company or to do another project, I would absolutely repeat, is to go quickly to the market, start collecting data, real-world data really quickly, and build a feedback loop where you're constantly training, testing, validating on real-world data.” — Joe Brew</p><p><br></p><p>“Our aim is not just to get nice comments on the App Store or nice emails. It's to impact the lives of millions. Everybody who breathes has lungs and everybody with lungs coughs. We think cough tracking is for everybody.” — Joe Brew</p><p><br></p><p>“Don't be turned off by problems that appear simple. Sometimes the simplest problems are the ones that are the most important to solve.” — Joe Brew</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/joe-brew-1061a33a/">Joe Brew on LinkedIn</a></p><p><a href="https://twitter.com/joethebrew?lang=en">Joe Brew on Twitter</a></p><p><a href="https://www.hyfe.ai/">Hyfe AI</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, ai, cough, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/9700c0fe/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Global Parametric Flood Coverage with Subit Chakrabarti from Floodbase</title>
      <itunes:episode>45</itunes:episode>
      <podcast:episode>45</podcast:episode>
      <itunes:title>Global Parametric Flood Coverage with Subit Chakrabarti from Floodbase</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">ca3ec7ca-6202-46d1-933e-fcb7dfee796f</guid>
      <link>https://pixelscientia.com/podcast/global-parametric-flood-coverage-with-subit-chakrabarti-from-floodbase/</link>
      <description>
        <![CDATA[<p>The impact of AI knows no bounds. Today, I am joined by Subit Chakrabarti, Vice President of Technology at Floodbase, a mission-driven, machine-learning-powered company specializing in flood monitoring and insurance. Having grown up in Eastern India, he knows the importance of adapting to global flood risk first-hand.</p><p>In this episode, Subit shares insights on how Floodbase utilizes advanced AI and diverse satellite imagery to support the design of parametric flood insurance solutions. We discover how machine learning plays a crucial role in analyzing vast datasets and bridging the insurance gap for regions vulnerable to flooding. Join us as we explore the transformative potential of Floodbase's technology and its vision for a more secure and equitable future, in the context of global warming and the associated global flood risk.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Subit Chakrabarti, Vice President of Technology at Floodbase.</li><li>Subit's background: what led him to Floodbase.</li><li>Insight into Floodbase and its focus on parametric flood insurance and disaster response.</li><li>Subit explains parametric insurance.</li><li>How Floodbase uses machine learning to design the index for parametric insurance.</li><li>Their use of satellite imagery and other geospatial datasets in setting up their ML models.</li><li>The challenges of working with such diverse data sets.</li><li>What made it possible to build Floodbase's technology (spoiler alert: advanced AI).</li><li>How Floodbase measures its impact.</li><li>Subit’s advice for AI-powered startup leaders: address bias and build a skilled AI team.</li><li>Floodbase’s three to five-year plan: make insurance more accessible. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Adapting to global flood risk is something that is near and dear to my heart, having grown up in India and having seen a lot of damage from floods in Eastern India where I used to live.” — Subit Chakrabarti</p><p><br></p><p>“Parametric insurance pays out when a pre-agreed weather condition is made separate from the physical damage.” — Subit Chakrabarti</p><p><br></p><p>“What we use machine learning for is to design [the] index that the parametric insurance can be based on, and that is our proprietary AI technology.” — Subit Chakrabarti</p><p><br></p><p>“One of the most important challenges with satellite imagery is that satellite imagery represents the condition of a place at a certain point in time and it’s not the continuous movement of what that flood looks like at that place.” — Subit Chakrabarti</p><p><br></p><p>“Our policy at Floodbase is that we add more data to remove bias from the process.” — Subit Chakrabarti</p><p><br></p><p>“The biggest thing that we can measure is the flood protection gap. So like I said, 83% of losses are uninsured and we can measure that.” — Subit Chakrabarti</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/subit-chakrabarti-74045456/">Subit Chakrabarti on LinkedIn</a></p><p><a href="https://twitter.com/abitmore">Subit Chakrabarti on Twitter</a></p><p><a href="https://www.floodbase.com/">Floodbase</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>The impact of AI knows no bounds. Today, I am joined by Subit Chakrabarti, Vice President of Technology at Floodbase, a mission-driven, machine-learning-powered company specializing in flood monitoring and insurance. Having grown up in Eastern India, he knows the importance of adapting to global flood risk first-hand.</p><p>In this episode, Subit shares insights on how Floodbase utilizes advanced AI and diverse satellite imagery to support the design of parametric flood insurance solutions. We discover how machine learning plays a crucial role in analyzing vast datasets and bridging the insurance gap for regions vulnerable to flooding. Join us as we explore the transformative potential of Floodbase's technology and its vision for a more secure and equitable future, in the context of global warming and the associated global flood risk.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Subit Chakrabarti, Vice President of Technology at Floodbase.</li><li>Subit's background: what led him to Floodbase.</li><li>Insight into Floodbase and its focus on parametric flood insurance and disaster response.</li><li>Subit explains parametric insurance.</li><li>How Floodbase uses machine learning to design the index for parametric insurance.</li><li>Their use of satellite imagery and other geospatial datasets in setting up their ML models.</li><li>The challenges of working with such diverse data sets.</li><li>What made it possible to build Floodbase's technology (spoiler alert: advanced AI).</li><li>How Floodbase measures its impact.</li><li>Subit’s advice for AI-powered startup leaders: address bias and build a skilled AI team.</li><li>Floodbase’s three to five-year plan: make insurance more accessible. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Adapting to global flood risk is something that is near and dear to my heart, having grown up in India and having seen a lot of damage from floods in Eastern India where I used to live.” — Subit Chakrabarti</p><p><br></p><p>“Parametric insurance pays out when a pre-agreed weather condition is made separate from the physical damage.” — Subit Chakrabarti</p><p><br></p><p>“What we use machine learning for is to design [the] index that the parametric insurance can be based on, and that is our proprietary AI technology.” — Subit Chakrabarti</p><p><br></p><p>“One of the most important challenges with satellite imagery is that satellite imagery represents the condition of a place at a certain point in time and it’s not the continuous movement of what that flood looks like at that place.” — Subit Chakrabarti</p><p><br></p><p>“Our policy at Floodbase is that we add more data to remove bias from the process.” — Subit Chakrabarti</p><p><br></p><p>“The biggest thing that we can measure is the flood protection gap. So like I said, 83% of losses are uninsured and we can measure that.” — Subit Chakrabarti</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/subit-chakrabarti-74045456/">Subit Chakrabarti on LinkedIn</a></p><p><a href="https://twitter.com/abitmore">Subit Chakrabarti on Twitter</a></p><p><a href="https://www.floodbase.com/">Floodbase</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 14 Aug 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/c82e2df2/9195066a.mp3" length="37096064" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/26YyHjL-68nUtXFvzaP7sY6lYXo_AXvdQkemdcxnKH8/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE0MzUyNjQv/MTY5MDU1NDQ0OC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1539</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>The impact of AI knows no bounds. Today, I am joined by Subit Chakrabarti, Vice President of Technology at Floodbase, a mission-driven, machine-learning-powered company specializing in flood monitoring and insurance. Having grown up in Eastern India, he knows the importance of adapting to global flood risk first-hand.</p><p>In this episode, Subit shares insights on how Floodbase utilizes advanced AI and diverse satellite imagery to support the design of parametric flood insurance solutions. We discover how machine learning plays a crucial role in analyzing vast datasets and bridging the insurance gap for regions vulnerable to flooding. Join us as we explore the transformative potential of Floodbase's technology and its vision for a more secure and equitable future, in the context of global warming and the associated global flood risk.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Subit Chakrabarti, Vice President of Technology at Floodbase.</li><li>Subit's background: what led him to Floodbase.</li><li>Insight into Floodbase and its focus on parametric flood insurance and disaster response.</li><li>Subit explains parametric insurance.</li><li>How Floodbase uses machine learning to design the index for parametric insurance.</li><li>Their use of satellite imagery and other geospatial datasets in setting up their ML models.</li><li>The challenges of working with such diverse data sets.</li><li>What made it possible to build Floodbase's technology (spoiler alert: advanced AI).</li><li>How Floodbase measures its impact.</li><li>Subit’s advice for AI-powered startup leaders: address bias and build a skilled AI team.</li><li>Floodbase’s three to five-year plan: make insurance more accessible. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Adapting to global flood risk is something that is near and dear to my heart, having grown up in India and having seen a lot of damage from floods in Eastern India where I used to live.” — Subit Chakrabarti</p><p><br></p><p>“Parametric insurance pays out when a pre-agreed weather condition is made separate from the physical damage.” — Subit Chakrabarti</p><p><br></p><p>“What we use machine learning for is to design [the] index that the parametric insurance can be based on, and that is our proprietary AI technology.” — Subit Chakrabarti</p><p><br></p><p>“One of the most important challenges with satellite imagery is that satellite imagery represents the condition of a place at a certain point in time and it’s not the continuous movement of what that flood looks like at that place.” — Subit Chakrabarti</p><p><br></p><p>“Our policy at Floodbase is that we add more data to remove bias from the process.” — Subit Chakrabarti</p><p><br></p><p>“The biggest thing that we can measure is the flood protection gap. So like I said, 83% of losses are uninsured and we can measure that.” — Subit Chakrabarti</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/subit-chakrabarti-74045456/">Subit Chakrabarti on LinkedIn</a></p><p><a href="https://twitter.com/abitmore">Subit Chakrabarti on Twitter</a></p><p><a href="https://www.floodbase.com/">Floodbase</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, deep learning, flood, insurance, remote sensing, geospatial</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/c82e2df2/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Detecting Gastrointestinal Cancers Earlier with Marcel Gehrung from Cyted</title>
      <itunes:episode>44</itunes:episode>
      <podcast:episode>44</podcast:episode>
      <itunes:title>Detecting Gastrointestinal Cancers Earlier with Marcel Gehrung from Cyted</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">f0973197-7030-4af9-a891-0425412bea90</guid>
      <link>https://pixelscientia.com/podcast/detecting-gastrointestinal-cancers-earlier-with-marcel-gehrung-from-cyted/</link>
      <description>
        <![CDATA[<p>The role of AI in cancer detection grows more significant with each passing week. During this conversation, I welcome Marcel Gehrung, CEO and Co-Founder of Cyted, to discuss detecting gastrointestinal cancer. You’ll learn how Cyted leverages machine learning to diagnose Barrett’s Esophagus in upper GI samples. Marcel reveals some of the challenges he has faced at Cyted related to the limited autonomy an algorithm can realistically provide, and annotating data for training and validation. Hear how the company is responding to changes in AI, and why hiring for technical roles at Cyted has not been difficult, due to their location. You’ll hear Marcel’s perspective on hiring specialist generalists and some of his advice for leaders at AI-powered startups.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing founder and CEO at Cyted, Marcel Gehrung. </li><li>His path to focusing on gastrointestinal cancer detection.</li><li>How the technology at Cyted works to diagnose Barrett’s Esophagus. </li><li>What Barrett’s is and who is most susceptible to it. </li><li>The role of machine learning in detecting cancer in upper GI samples.</li><li>Navigating the challenge of how much autonomy an algorithm can provide.</li><li>Annotating data for training and validation.</li><li>How Cyted is responding to changes in AI.</li><li>Hiring for technical roles at Cyted.</li><li>Onboarding challenges due to the verticality of technology Cyted works with. </li><li>Why Marcel advocates for hiring specialized generalists.</li><li>Marcel’s advice for leaders of AI-powered startups. </li><li>Where he sees the impact of Cyted in three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We’re essentially leveraging the best of both worlds. We’re working with cytoscreeners, which we also have on our staff to generate the initial annotations, and then we have someone who looks at it and then reclassifies if necessary.” — Marcel Gehrung</p><p><br></p><p>“The more ability the candidates have to horizontally integrate different types of knowledge from across the company or across the technology of the sector, the better.” — Marcel Gehrung</p><p><strong><br></strong>“Getting carried away just happens so easily, particularly when we follow the various news outlets in the world that overwhelm us with new exciting ideas and functions of that technology.” — Marcel Gehrung</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/marcel-gehrung/">Marcel Gehrung on LinkedIn</a><br><a href="https://twitter.com/marcelgehrung">Marcel Gehrung on Twitter</a><br><a href="https://cyted.ai/">Cyted</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>The role of AI in cancer detection grows more significant with each passing week. During this conversation, I welcome Marcel Gehrung, CEO and Co-Founder of Cyted, to discuss detecting gastrointestinal cancer. You’ll learn how Cyted leverages machine learning to diagnose Barrett’s Esophagus in upper GI samples. Marcel reveals some of the challenges he has faced at Cyted related to the limited autonomy an algorithm can realistically provide, and annotating data for training and validation. Hear how the company is responding to changes in AI, and why hiring for technical roles at Cyted has not been difficult, due to their location. You’ll hear Marcel’s perspective on hiring specialist generalists and some of his advice for leaders at AI-powered startups.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing founder and CEO at Cyted, Marcel Gehrung. </li><li>His path to focusing on gastrointestinal cancer detection.</li><li>How the technology at Cyted works to diagnose Barrett’s Esophagus. </li><li>What Barrett’s is and who is most susceptible to it. </li><li>The role of machine learning in detecting cancer in upper GI samples.</li><li>Navigating the challenge of how much autonomy an algorithm can provide.</li><li>Annotating data for training and validation.</li><li>How Cyted is responding to changes in AI.</li><li>Hiring for technical roles at Cyted.</li><li>Onboarding challenges due to the verticality of technology Cyted works with. </li><li>Why Marcel advocates for hiring specialized generalists.</li><li>Marcel’s advice for leaders of AI-powered startups. </li><li>Where he sees the impact of Cyted in three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We’re essentially leveraging the best of both worlds. We’re working with cytoscreeners, which we also have on our staff to generate the initial annotations, and then we have someone who looks at it and then reclassifies if necessary.” — Marcel Gehrung</p><p><br></p><p>“The more ability the candidates have to horizontally integrate different types of knowledge from across the company or across the technology of the sector, the better.” — Marcel Gehrung</p><p><strong><br></strong>“Getting carried away just happens so easily, particularly when we follow the various news outlets in the world that overwhelm us with new exciting ideas and functions of that technology.” — Marcel Gehrung</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/marcel-gehrung/">Marcel Gehrung on LinkedIn</a><br><a href="https://twitter.com/marcelgehrung">Marcel Gehrung on Twitter</a><br><a href="https://cyted.ai/">Cyted</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 07 Aug 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/bde37774/b6dcc33f.mp3" length="39602052" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/Xw9u73BHCLiWg8O1h3MdcimJ28dEDi2cTV5bGvX9Ty8/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEzODU0MTIv/MTY4Njg1MzY0OC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1645</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>The role of AI in cancer detection grows more significant with each passing week. During this conversation, I welcome Marcel Gehrung, CEO and Co-Founder of Cyted, to discuss detecting gastrointestinal cancer. You’ll learn how Cyted leverages machine learning to diagnose Barrett’s Esophagus in upper GI samples. Marcel reveals some of the challenges he has faced at Cyted related to the limited autonomy an algorithm can realistically provide, and annotating data for training and validation. Hear how the company is responding to changes in AI, and why hiring for technical roles at Cyted has not been difficult, due to their location. You’ll hear Marcel’s perspective on hiring specialist generalists and some of his advice for leaders at AI-powered startups.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing founder and CEO at Cyted, Marcel Gehrung. </li><li>His path to focusing on gastrointestinal cancer detection.</li><li>How the technology at Cyted works to diagnose Barrett’s Esophagus. </li><li>What Barrett’s is and who is most susceptible to it. </li><li>The role of machine learning in detecting cancer in upper GI samples.</li><li>Navigating the challenge of how much autonomy an algorithm can provide.</li><li>Annotating data for training and validation.</li><li>How Cyted is responding to changes in AI.</li><li>Hiring for technical roles at Cyted.</li><li>Onboarding challenges due to the verticality of technology Cyted works with. </li><li>Why Marcel advocates for hiring specialized generalists.</li><li>Marcel’s advice for leaders of AI-powered startups. </li><li>Where he sees the impact of Cyted in three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We’re essentially leveraging the best of both worlds. We’re working with cytoscreeners, which we also have on our staff to generate the initial annotations, and then we have someone who looks at it and then reclassifies if necessary.” — Marcel Gehrung</p><p><br></p><p>“The more ability the candidates have to horizontally integrate different types of knowledge from across the company or across the technology of the sector, the better.” — Marcel Gehrung</p><p><strong><br></strong>“Getting carried away just happens so easily, particularly when we follow the various news outlets in the world that overwhelm us with new exciting ideas and functions of that technology.” — Marcel Gehrung</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/marcel-gehrung/">Marcel Gehrung on LinkedIn</a><br><a href="https://twitter.com/marcelgehrung">Marcel Gehrung on Twitter</a><br><a href="https://cyted.ai/">Cyted</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, computer vision, healthcare, medical imaging, cancer</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/bde37774/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Greener Home Upgrades with Ankur Garg from BlocPower</title>
      <itunes:episode>43</itunes:episode>
      <podcast:episode>43</podcast:episode>
      <itunes:title>Greener Home Upgrades with Ankur Garg from BlocPower</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">648fc8e2-4b16-4421-b9d8-49707146b9da</guid>
      <link>https://pixelscientia.com/podcast/greener-home-upgrades-with-ankur-garg-from-blocpower/</link>
      <description>
        <![CDATA[<p>Climate change is one of the most pressing issues of our time, and today’s guest, Ankur Garg, and his team at BlocPower are using machine learning technology to mitigate it. BlocPower is a climate technology company that is focused on making buildings in low and middle-income areas more environmentally friendly. Their area of expertise lies in developing products and services to lower or eliminate the barriers that prevent access to energy efficiency and electrification retrofits. And this all starts with gathering, checking, annotating, and understanding enormous amounts of data (BlocPower currently has over 40 terabytes of data in its data lake!)</p><p>In this episode, Ankur talks about the innovative ways in which BlocPower deals with its data, the challenges that they face when it comes to the size and scope of its datasets, why machine learning technology is central to the work they do, and how they measure the impact of their technology.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Ankur’s career journey prior to joining BlocPower. </li><li>Why Ankur decided to join BlocPower.  </li><li>The inspiring work that BlocPower is doing to contribute to solving the problem of climate change.</li><li>The central role that machine learning plays in BlocPower’s approach.</li><li>Ankur gives examples of some of the different types of machine learning models that BlocPower uses.</li><li>The size of BlocPower’s data lake and the types of data stored within it.</li><li>BlocPower’s innovative approach to annotating data. </li><li>The importance of high-quality training data sets in the machine learning space.</li><li>Challenges that Ankur and his team face when training machine learning models on their core dataset.</li><li>Technological advancements that have allowed BlocPower to achieve what it has.</li><li>How BlocPower measures the impact of its technology.</li><li>What Ankur believes the future holds for BlocPower </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Climate change is one of the primary problems of our generation, and BlocPower is making a huge dent in solving that.” — Ankur Garg</p><p><br></p><p>“Machine learning really excels at ingesting huge volumes of data and to be able to infer key relationships between these data points to come up with an optimal output or a solution.” — Ankur Garg</p><p><br></p><p>“Labeling the data and annotating is extremely critical. If your training data set is not of a good quality, no matter what algorithm you use, it won't really perform well.” — Ankur Garg</p><p><br></p><p>“You need a lot of high-quality data for machine learning and artificial intelligence to be productive.” — Ankur Garg</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/ankurgargyale/">Ankur Garg on LinkedIn</a></p><p><a href="https://www.blocpower.io/">BlocPower</a></p><p><a href="mailto:support@blocpower.io">BlocPower Email Address</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Climate change is one of the most pressing issues of our time, and today’s guest, Ankur Garg, and his team at BlocPower are using machine learning technology to mitigate it. BlocPower is a climate technology company that is focused on making buildings in low and middle-income areas more environmentally friendly. Their area of expertise lies in developing products and services to lower or eliminate the barriers that prevent access to energy efficiency and electrification retrofits. And this all starts with gathering, checking, annotating, and understanding enormous amounts of data (BlocPower currently has over 40 terabytes of data in its data lake!)</p><p>In this episode, Ankur talks about the innovative ways in which BlocPower deals with its data, the challenges that they face when it comes to the size and scope of its datasets, why machine learning technology is central to the work they do, and how they measure the impact of their technology.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Ankur’s career journey prior to joining BlocPower. </li><li>Why Ankur decided to join BlocPower.  </li><li>The inspiring work that BlocPower is doing to contribute to solving the problem of climate change.</li><li>The central role that machine learning plays in BlocPower’s approach.</li><li>Ankur gives examples of some of the different types of machine learning models that BlocPower uses.</li><li>The size of BlocPower’s data lake and the types of data stored within it.</li><li>BlocPower’s innovative approach to annotating data. </li><li>The importance of high-quality training data sets in the machine learning space.</li><li>Challenges that Ankur and his team face when training machine learning models on their core dataset.</li><li>Technological advancements that have allowed BlocPower to achieve what it has.</li><li>How BlocPower measures the impact of its technology.</li><li>What Ankur believes the future holds for BlocPower </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Climate change is one of the primary problems of our generation, and BlocPower is making a huge dent in solving that.” — Ankur Garg</p><p><br></p><p>“Machine learning really excels at ingesting huge volumes of data and to be able to infer key relationships between these data points to come up with an optimal output or a solution.” — Ankur Garg</p><p><br></p><p>“Labeling the data and annotating is extremely critical. If your training data set is not of a good quality, no matter what algorithm you use, it won't really perform well.” — Ankur Garg</p><p><br></p><p>“You need a lot of high-quality data for machine learning and artificial intelligence to be productive.” — Ankur Garg</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/ankurgargyale/">Ankur Garg on LinkedIn</a></p><p><a href="https://www.blocpower.io/">BlocPower</a></p><p><a href="mailto:support@blocpower.io">BlocPower Email Address</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 31 Jul 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/bd0ab8bb/55fd2d56.mp3" length="23976023" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/Z8x1hxrWhkO1mNuXDh9g0zhlwIZjXiOxrb1YYg8dKbo/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE0MDY3MDIv/MTY4ODQzNzQ0MS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1490</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Climate change is one of the most pressing issues of our time, and today’s guest, Ankur Garg, and his team at BlocPower are using machine learning technology to mitigate it. BlocPower is a climate technology company that is focused on making buildings in low and middle-income areas more environmentally friendly. Their area of expertise lies in developing products and services to lower or eliminate the barriers that prevent access to energy efficiency and electrification retrofits. And this all starts with gathering, checking, annotating, and understanding enormous amounts of data (BlocPower currently has over 40 terabytes of data in its data lake!)</p><p>In this episode, Ankur talks about the innovative ways in which BlocPower deals with its data, the challenges that they face when it comes to the size and scope of its datasets, why machine learning technology is central to the work they do, and how they measure the impact of their technology.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Ankur’s career journey prior to joining BlocPower. </li><li>Why Ankur decided to join BlocPower.  </li><li>The inspiring work that BlocPower is doing to contribute to solving the problem of climate change.</li><li>The central role that machine learning plays in BlocPower’s approach.</li><li>Ankur gives examples of some of the different types of machine learning models that BlocPower uses.</li><li>The size of BlocPower’s data lake and the types of data stored within it.</li><li>BlocPower’s innovative approach to annotating data. </li><li>The importance of high-quality training data sets in the machine learning space.</li><li>Challenges that Ankur and his team face when training machine learning models on their core dataset.</li><li>Technological advancements that have allowed BlocPower to achieve what it has.</li><li>How BlocPower measures the impact of its technology.</li><li>What Ankur believes the future holds for BlocPower </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Climate change is one of the primary problems of our generation, and BlocPower is making a huge dent in solving that.” — Ankur Garg</p><p><br></p><p>“Machine learning really excels at ingesting huge volumes of data and to be able to infer key relationships between these data points to come up with an optimal output or a solution.” — Ankur Garg</p><p><br></p><p>“Labeling the data and annotating is extremely critical. If your training data set is not of a good quality, no matter what algorithm you use, it won't really perform well.” — Ankur Garg</p><p><br></p><p>“You need a lot of high-quality data for machine learning and artificial intelligence to be productive.” — Ankur Garg</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/ankurgargyale/">Ankur Garg on LinkedIn</a></p><p><a href="https://www.blocpower.io/">BlocPower</a></p><p><a href="mailto:support@blocpower.io">BlocPower Email Address</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, ai, climate action</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/bd0ab8bb/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Microscopy Image Analysis with Philipp Kainz from KML Vision</title>
      <itunes:episode>42</itunes:episode>
      <podcast:episode>42</podcast:episode>
      <itunes:title>Microscopy Image Analysis with Philipp Kainz from KML Vision</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">414aa372-eb86-4333-86dc-e7b0e8961cfc</guid>
      <link>https://pixelscientia.com/podcast/microscopy-image-analysis-with-philipp-kainz-from-kml-vision/</link>
      <description>
        <![CDATA[<p>If you are working in the life science research space and battling with image recognition issues, firstly, you are far from alone, and secondly, there is a solution! That solution comes in the form of KML Vision, an AI-powered start-up co-founded by today’s guest, Philipp Kainz. In this episode, Philipp explains how he became aware of the image analysis problem and the process that he and his team have gone through to develop machine learning models that provide a range of benefits to a diverse cohort of end users. There is still a large gap between what is technologically possible in a research or lab setting and what is actually out there and what people can use. Through their flagship product, IKOSA, Phillip is on a mission to change that. Listen to this episode to gain an understanding of how machine learning is being used to shape the future of life science research! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>The motivation behind the founding of KML Vision.</li><li>The value that KML Vision’s cloud platform, IKOSA, brings to the life science research space.</li><li>The diversity of end users of IKOSA.</li><li>Benefits that IKOSA provides to its users.</li><li>Examples of some of the most common use cases for IKOSA.</li><li>The role that machine learning plays at KML Vision.</li><li>How KML Vision trains their models.</li><li>The challenges that the KML Vision team have run into when training their models.</li><li>Philipp explains KML Vision’s approach to developing the machine learning aspects of a new product or feature.</li><li>How KML Vision helps to solve the problem of reproducibility in the life science research space.</li><li>Valuable advice for leaders of AI-powered start-ups.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We basically set out to help people overcome this barrier of using new technologies for image analysis.” — Philipp Kainz</p><p><br></p><p>“There is still a big gap between what is technologically possible in a research or lab setting and what is actually out there and what people can use. So, we are actually focusing on bridging that gap.” — Philipp Kainz</p><p><br></p><p>“Nobody [really] has time to go into the inner workings of deep learning. They want to use it like we use this smartphone today. This is where we want to be in three to five years.” — Philipp Kainz</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/philipp-kainz/?locale=en_US">Philipp Kainz on LinkedIn</a></p><p><a href="https://www.kmlvision.com/">KML Vision</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>If you are working in the life science research space and battling with image recognition issues, firstly, you are far from alone, and secondly, there is a solution! That solution comes in the form of KML Vision, an AI-powered start-up co-founded by today’s guest, Philipp Kainz. In this episode, Philipp explains how he became aware of the image analysis problem and the process that he and his team have gone through to develop machine learning models that provide a range of benefits to a diverse cohort of end users. There is still a large gap between what is technologically possible in a research or lab setting and what is actually out there and what people can use. Through their flagship product, IKOSA, Phillip is on a mission to change that. Listen to this episode to gain an understanding of how machine learning is being used to shape the future of life science research! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>The motivation behind the founding of KML Vision.</li><li>The value that KML Vision’s cloud platform, IKOSA, brings to the life science research space.</li><li>The diversity of end users of IKOSA.</li><li>Benefits that IKOSA provides to its users.</li><li>Examples of some of the most common use cases for IKOSA.</li><li>The role that machine learning plays at KML Vision.</li><li>How KML Vision trains their models.</li><li>The challenges that the KML Vision team have run into when training their models.</li><li>Philipp explains KML Vision’s approach to developing the machine learning aspects of a new product or feature.</li><li>How KML Vision helps to solve the problem of reproducibility in the life science research space.</li><li>Valuable advice for leaders of AI-powered start-ups.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We basically set out to help people overcome this barrier of using new technologies for image analysis.” — Philipp Kainz</p><p><br></p><p>“There is still a big gap between what is technologically possible in a research or lab setting and what is actually out there and what people can use. So, we are actually focusing on bridging that gap.” — Philipp Kainz</p><p><br></p><p>“Nobody [really] has time to go into the inner workings of deep learning. They want to use it like we use this smartphone today. This is where we want to be in three to five years.” — Philipp Kainz</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/philipp-kainz/?locale=en_US">Philipp Kainz on LinkedIn</a></p><p><a href="https://www.kmlvision.com/">KML Vision</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 24 Jul 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/d748ffb6/2fb4acd9.mp3" length="27863131" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/duAMawiq64B4SbmphbRBYg1E-W-YIoD6GKxhxif655g/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEzODU0MTAv/MTY4Njg1MzUyOC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1152</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>If you are working in the life science research space and battling with image recognition issues, firstly, you are far from alone, and secondly, there is a solution! That solution comes in the form of KML Vision, an AI-powered start-up co-founded by today’s guest, Philipp Kainz. In this episode, Philipp explains how he became aware of the image analysis problem and the process that he and his team have gone through to develop machine learning models that provide a range of benefits to a diverse cohort of end users. There is still a large gap between what is technologically possible in a research or lab setting and what is actually out there and what people can use. Through their flagship product, IKOSA, Phillip is on a mission to change that. Listen to this episode to gain an understanding of how machine learning is being used to shape the future of life science research! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>The motivation behind the founding of KML Vision.</li><li>The value that KML Vision’s cloud platform, IKOSA, brings to the life science research space.</li><li>The diversity of end users of IKOSA.</li><li>Benefits that IKOSA provides to its users.</li><li>Examples of some of the most common use cases for IKOSA.</li><li>The role that machine learning plays at KML Vision.</li><li>How KML Vision trains their models.</li><li>The challenges that the KML Vision team have run into when training their models.</li><li>Philipp explains KML Vision’s approach to developing the machine learning aspects of a new product or feature.</li><li>How KML Vision helps to solve the problem of reproducibility in the life science research space.</li><li>Valuable advice for leaders of AI-powered start-ups.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We basically set out to help people overcome this barrier of using new technologies for image analysis.” — Philipp Kainz</p><p><br></p><p>“There is still a big gap between what is technologically possible in a research or lab setting and what is actually out there and what people can use. So, we are actually focusing on bridging that gap.” — Philipp Kainz</p><p><br></p><p>“Nobody [really] has time to go into the inner workings of deep learning. They want to use it like we use this smartphone today. This is where we want to be in three to five years.” — Philipp Kainz</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/philipp-kainz/?locale=en_US">Philipp Kainz on LinkedIn</a></p><p><a href="https://www.kmlvision.com/">KML Vision</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, computer vision, medical imaging, image analysis</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/d748ffb6/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Accelerating Materials Development with Greg Mulholland from Citrine Informatics</title>
      <itunes:episode>41</itunes:episode>
      <podcast:episode>41</podcast:episode>
      <itunes:title>Accelerating Materials Development with Greg Mulholland from Citrine Informatics</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">967a9ae5-99f8-4921-8054-537f8d9e4c01</guid>
      <link>https://pixelscientia.com/podcast/accelerating-materials-development-with-greg-mulholland-from-citrine-informatics/</link>
      <description>
        <![CDATA[<p>Sustainability is finally getting the attention it deserves as the global drive to reduce our carbon emissions gets more frantic each day. Thankfully, the progression of AI has accelerated the way materials and chemical manufacturers can go about their business in an environmentally friendly and sustainable manner.</p><p>Today I am joined by Greg Mulholland, the Co-Founder and CEO of Citrine Informatics, a technology company that is focused on accelerating the development of the next generation of materials and chemicals. We discuss the role of machine learning in Citrine’s technology, the challenges they are forced to overcome regarding their data sets, the model accuracy and explainability balance, and how Greg and his team validate their models. There is no doubt that Citrine’s work is vital for the global sustainability effort, and our guest explains his company’s collaborative programs, how publishing research articles has boosted Citrine’s profile, what this AI-powered business hopes to achieve in the next five years, and so much more! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Greg Mulholland, his professional background, and how he ended up at Citrine. </li><li>Greg explains what Citrine does and why this work is important for sustainability. </li><li>The role of machine learning in Citrine’s technology. </li><li>Taking a closer look at Citrine’s data sets and the data challenges that they encounter.</li><li>The techniques that Greg and his team use to successfully handle small data sets. </li><li>Examining the balance between model accuracy and explainability. </li><li>How he validates his models. </li><li>An explanation of Citrine’s collaborative program with external researchers. </li><li>The benefits of publishing research articles. </li><li>Greg’s advice to other leaders of AI-powered startups. </li><li>His vision for Citrine’s impact and influence over the next five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“I trained as an electrical engineer and got into material science because I believed that material science was really an important technology set of disciplines that we needed, to solve the world's most pressing environmental challenges.” — Greg Mulholland</p><p>“We started the company 10 years ago now; we've been able to show that machine learning and artificial intelligence, among other things, can be used to really accelerate the future of the materials and chemicals industry. It was the vision all along, but it really required a lot of technology development and we're really proud of how far we've come.” — Greg Mulholland</p><p><br></p><p>“The scientists in our community are brilliant people.” — Greg Mulholland</p><p><br></p><p>“Explainability is important. Accuracy is also important. Neither is dominant over the other. It turns out, a less accurate model that is more explainable can often help unlock new thinking in a scientist's mind, that then unlocks the next-generation product.” — Greg Mulholland</p><p><br></p><p>“Publishing what we do as a starter for more conversations; I think it helps us attract good talent. It helps people understand that we're doing cutting-edge research and continue to invest in driving forward the field. I take it as a little bit of a feather in our cap and a source of pride that we get to help the world move along into this new era of AI.” — Greg Mulholland</p><p><br></p><p>“We've seen companies remove toxic chemicals from important products much more quickly than they could have otherwise. We've seen companies reduce their energy consumption. We've seen companies reduce costs and reduce carbon input. Those are all really exciting to me.” — Greg Mulholland</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/gregorymulholland/">Greg Mulholland on LinkedIn</a></p><p><a href="https://twitter.com/gregmulholland">Greg Mulholland on Twitter</a></p><p><a href="https://citrine.io/">Citrine Informatics</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Sustainability is finally getting the attention it deserves as the global drive to reduce our carbon emissions gets more frantic each day. Thankfully, the progression of AI has accelerated the way materials and chemical manufacturers can go about their business in an environmentally friendly and sustainable manner.</p><p>Today I am joined by Greg Mulholland, the Co-Founder and CEO of Citrine Informatics, a technology company that is focused on accelerating the development of the next generation of materials and chemicals. We discuss the role of machine learning in Citrine’s technology, the challenges they are forced to overcome regarding their data sets, the model accuracy and explainability balance, and how Greg and his team validate their models. There is no doubt that Citrine’s work is vital for the global sustainability effort, and our guest explains his company’s collaborative programs, how publishing research articles has boosted Citrine’s profile, what this AI-powered business hopes to achieve in the next five years, and so much more! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Greg Mulholland, his professional background, and how he ended up at Citrine. </li><li>Greg explains what Citrine does and why this work is important for sustainability. </li><li>The role of machine learning in Citrine’s technology. </li><li>Taking a closer look at Citrine’s data sets and the data challenges that they encounter.</li><li>The techniques that Greg and his team use to successfully handle small data sets. </li><li>Examining the balance between model accuracy and explainability. </li><li>How he validates his models. </li><li>An explanation of Citrine’s collaborative program with external researchers. </li><li>The benefits of publishing research articles. </li><li>Greg’s advice to other leaders of AI-powered startups. </li><li>His vision for Citrine’s impact and influence over the next five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“I trained as an electrical engineer and got into material science because I believed that material science was really an important technology set of disciplines that we needed, to solve the world's most pressing environmental challenges.” — Greg Mulholland</p><p>“We started the company 10 years ago now; we've been able to show that machine learning and artificial intelligence, among other things, can be used to really accelerate the future of the materials and chemicals industry. It was the vision all along, but it really required a lot of technology development and we're really proud of how far we've come.” — Greg Mulholland</p><p><br></p><p>“The scientists in our community are brilliant people.” — Greg Mulholland</p><p><br></p><p>“Explainability is important. Accuracy is also important. Neither is dominant over the other. It turns out, a less accurate model that is more explainable can often help unlock new thinking in a scientist's mind, that then unlocks the next-generation product.” — Greg Mulholland</p><p><br></p><p>“Publishing what we do as a starter for more conversations; I think it helps us attract good talent. It helps people understand that we're doing cutting-edge research and continue to invest in driving forward the field. I take it as a little bit of a feather in our cap and a source of pride that we get to help the world move along into this new era of AI.” — Greg Mulholland</p><p><br></p><p>“We've seen companies remove toxic chemicals from important products much more quickly than they could have otherwise. We've seen companies reduce their energy consumption. We've seen companies reduce costs and reduce carbon input. Those are all really exciting to me.” — Greg Mulholland</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/gregorymulholland/">Greg Mulholland on LinkedIn</a></p><p><a href="https://twitter.com/gregmulholland">Greg Mulholland on Twitter</a></p><p><a href="https://citrine.io/">Citrine Informatics</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 17 Jul 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/f607d5b3/adeb0f5a.mp3" length="24980004" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/4euob_19vAiCKfycecD5tWBcuRBfsnaOW9mGcwPZIcY/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzE0MDY3MDEv/MTY4ODQzNzI3Mi1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1555</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Sustainability is finally getting the attention it deserves as the global drive to reduce our carbon emissions gets more frantic each day. Thankfully, the progression of AI has accelerated the way materials and chemical manufacturers can go about their business in an environmentally friendly and sustainable manner.</p><p>Today I am joined by Greg Mulholland, the Co-Founder and CEO of Citrine Informatics, a technology company that is focused on accelerating the development of the next generation of materials and chemicals. We discuss the role of machine learning in Citrine’s technology, the challenges they are forced to overcome regarding their data sets, the model accuracy and explainability balance, and how Greg and his team validate their models. There is no doubt that Citrine’s work is vital for the global sustainability effort, and our guest explains his company’s collaborative programs, how publishing research articles has boosted Citrine’s profile, what this AI-powered business hopes to achieve in the next five years, and so much more! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Greg Mulholland, his professional background, and how he ended up at Citrine. </li><li>Greg explains what Citrine does and why this work is important for sustainability. </li><li>The role of machine learning in Citrine’s technology. </li><li>Taking a closer look at Citrine’s data sets and the data challenges that they encounter.</li><li>The techniques that Greg and his team use to successfully handle small data sets. </li><li>Examining the balance between model accuracy and explainability. </li><li>How he validates his models. </li><li>An explanation of Citrine’s collaborative program with external researchers. </li><li>The benefits of publishing research articles. </li><li>Greg’s advice to other leaders of AI-powered startups. </li><li>His vision for Citrine’s impact and influence over the next five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“I trained as an electrical engineer and got into material science because I believed that material science was really an important technology set of disciplines that we needed, to solve the world's most pressing environmental challenges.” — Greg Mulholland</p><p>“We started the company 10 years ago now; we've been able to show that machine learning and artificial intelligence, among other things, can be used to really accelerate the future of the materials and chemicals industry. It was the vision all along, but it really required a lot of technology development and we're really proud of how far we've come.” — Greg Mulholland</p><p><br></p><p>“The scientists in our community are brilliant people.” — Greg Mulholland</p><p><br></p><p>“Explainability is important. Accuracy is also important. Neither is dominant over the other. It turns out, a less accurate model that is more explainable can often help unlock new thinking in a scientist's mind, that then unlocks the next-generation product.” — Greg Mulholland</p><p><br></p><p>“Publishing what we do as a starter for more conversations; I think it helps us attract good talent. It helps people understand that we're doing cutting-edge research and continue to invest in driving forward the field. I take it as a little bit of a feather in our cap and a source of pride that we get to help the world move along into this new era of AI.” — Greg Mulholland</p><p><br></p><p>“We've seen companies remove toxic chemicals from important products much more quickly than they could have otherwise. We've seen companies reduce their energy consumption. We've seen companies reduce costs and reduce carbon input. Those are all really exciting to me.” — Greg Mulholland</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/gregorymulholland/">Greg Mulholland on LinkedIn</a></p><p><a href="https://twitter.com/gregmulholland">Greg Mulholland on Twitter</a></p><p><a href="https://citrine.io/">Citrine Informatics</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, ai, material informatics</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/f607d5b3/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Decoding Biology with Aaron Mayer from Enable Medicine</title>
      <itunes:episode>40</itunes:episode>
      <podcast:episode>40</podcast:episode>
      <itunes:title>Decoding Biology with Aaron Mayer from Enable Medicine</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">f83d2251-8788-408f-86d3-aa39d260934f</guid>
      <link>https://pixelscientia.com/podcast/decoding-biology-with-aaron-mayer-from-enable-medicine/</link>
      <description>
        <![CDATA[<p>Spatial biology is an important part of the research being done to gain biological insights and joining me today on Impact AI to discuss how his company, Enable Medicine, uses AI to decode biology is Aaron Mayer. You’ll hear about Aaron's background, what led him to create his company, what Enable Medicine does and why, and how they use machine learning in their endeavors. Aaron shares the struggles they face, why they publish their research, the timing their company has nailed, and so much more! Finally, he shares some words of wisdom for other leaders of AI-powered startups.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Aaron’s background and what led him to create Enable Medicine. </li><li>What Enable Medicine does and why it’s important for healthcare. </li><li>The role machine learning plays and how it’s used with spatial biology data. </li><li>The challenges Aaron has faced working with this data.</li><li>How they prepared for the use of AI by creating a data infrastructure from scratch. </li><li>The importance of trust and transparency and the benefits of publishing articles. </li><li>Why this is the perfect time to build this kind of company.</li><li>Aaron shares some advice for other leaders of AI-powered startups. </li><li>Where Aaron sees the impact of Enable Medicine in the near future. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“The goal of Enable Medicine is really to organize biological data and make it searchable to deliver insights to the questions that we really care about.” — Aaron Mayer</p><p><br></p><p>“Machine learning and AI is deeply integrated into the platform and technology stack that we've been building [at Enable Medicine].” — Aaron Mayer</p><p><br></p><p>“We want to take these various AI models and put them into an environment where they can operate with an expert in a loop.” — Aaron Mayer</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/aaron-mayer-88237145/">Aaron Mayer on LinkedIn</a></p><p><a href="https://www.enablemedicine.com/">Enable Medicine</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Spatial biology is an important part of the research being done to gain biological insights and joining me today on Impact AI to discuss how his company, Enable Medicine, uses AI to decode biology is Aaron Mayer. You’ll hear about Aaron's background, what led him to create his company, what Enable Medicine does and why, and how they use machine learning in their endeavors. Aaron shares the struggles they face, why they publish their research, the timing their company has nailed, and so much more! Finally, he shares some words of wisdom for other leaders of AI-powered startups.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Aaron’s background and what led him to create Enable Medicine. </li><li>What Enable Medicine does and why it’s important for healthcare. </li><li>The role machine learning plays and how it’s used with spatial biology data. </li><li>The challenges Aaron has faced working with this data.</li><li>How they prepared for the use of AI by creating a data infrastructure from scratch. </li><li>The importance of trust and transparency and the benefits of publishing articles. </li><li>Why this is the perfect time to build this kind of company.</li><li>Aaron shares some advice for other leaders of AI-powered startups. </li><li>Where Aaron sees the impact of Enable Medicine in the near future. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“The goal of Enable Medicine is really to organize biological data and make it searchable to deliver insights to the questions that we really care about.” — Aaron Mayer</p><p><br></p><p>“Machine learning and AI is deeply integrated into the platform and technology stack that we've been building [at Enable Medicine].” — Aaron Mayer</p><p><br></p><p>“We want to take these various AI models and put them into an environment where they can operate with an expert in a loop.” — Aaron Mayer</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/aaron-mayer-88237145/">Aaron Mayer on LinkedIn</a></p><p><a href="https://www.enablemedicine.com/">Enable Medicine</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 10 Jul 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/8e4e0982/9109c597.mp3" length="37711621" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/ZGGgw10B8VMfWYx0vKQWixVepCqdfGBQCI2QqlgLoWM/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEzODU0MDkv/MTY4Njg1MzQwNS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1565</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Spatial biology is an important part of the research being done to gain biological insights and joining me today on Impact AI to discuss how his company, Enable Medicine, uses AI to decode biology is Aaron Mayer. You’ll hear about Aaron's background, what led him to create his company, what Enable Medicine does and why, and how they use machine learning in their endeavors. Aaron shares the struggles they face, why they publish their research, the timing their company has nailed, and so much more! Finally, he shares some words of wisdom for other leaders of AI-powered startups.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Aaron’s background and what led him to create Enable Medicine. </li><li>What Enable Medicine does and why it’s important for healthcare. </li><li>The role machine learning plays and how it’s used with spatial biology data. </li><li>The challenges Aaron has faced working with this data.</li><li>How they prepared for the use of AI by creating a data infrastructure from scratch. </li><li>The importance of trust and transparency and the benefits of publishing articles. </li><li>Why this is the perfect time to build this kind of company.</li><li>Aaron shares some advice for other leaders of AI-powered startups. </li><li>Where Aaron sees the impact of Enable Medicine in the near future. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“The goal of Enable Medicine is really to organize biological data and make it searchable to deliver insights to the questions that we really care about.” — Aaron Mayer</p><p><br></p><p>“Machine learning and AI is deeply integrated into the platform and technology stack that we've been building [at Enable Medicine].” — Aaron Mayer</p><p><br></p><p>“We want to take these various AI models and put them into an environment where they can operate with an expert in a loop.” — Aaron Mayer</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/aaron-mayer-88237145/">Aaron Mayer on LinkedIn</a></p><p><a href="https://www.enablemedicine.com/">Enable Medicine</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, computer vision, spatial biology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/8e4e0982/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Empowering the Visually Impaired with Karthik Kannan from Envision</title>
      <itunes:episode>39</itunes:episode>
      <podcast:episode>39</podcast:episode>
      <itunes:title>Empowering the Visually Impaired with Karthik Kannan from Envision</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">e07a8ccb-8999-4dc9-b1e4-ed4dd4a57922</guid>
      <link>https://pixelscientia.com/podcast/empowering-the-visually-impaired-with-karthik-kannan-from-envision/</link>
      <description>
        <![CDATA[<p>Today’s AI-powered company of focus, Envision, has made it its mission to improve the lives of the visually impaired so that they can live more independently. I am joined by Envision’s Co-Founder and CTO, Karthik Kannan, to discuss how he and his team gather data for their unique models, how they develop new products and features, and how they are able to ensure that their technology performs well with multiple users and across various environments. Technological advances and the recent boom in AI mean that now is the perfect time for Envision to thrive, and Karthik explains exactly how he and his team are taking advantage of this unique moment. We end this informative discussion with Karthik’s advice for other leaders of AI-powered startups, and what he hopes Envision will achieve in the next five years. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Karthik Kannan as he explains what led him to found Envision. </li><li>What Envision does and how its technology improves the lives of visually impaired people.</li><li>How Karthik and his team gather data for training their models. </li><li>His process of planning and developing a new machine learning feature. </li><li>Diving deeper into the development of a new product or feature. </li><li>How Karthik ensures that his technology performs across multiple users and environments. </li><li>Why now is the perfect time for Envision’s technology to thrive. </li><li>How Karthik measures the impact of his company’s technology. </li><li>His advice to other AI-powered startup leaders, and his hopes for the future of Envision.  </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“I was fascinated with games. I had asked my dad how people make games and he said, ‘They write code.’ Then he put me onto a programming class and that's how I got started with writing code.” — Karthik Kannan</p><p><br></p><p>“I didn't go on to my master's or anything. I just did my bachelor's, just started working directly, because I was more eager to get my hands dirty into making software and stuff.” — Karthik Kannan</p><p><br></p><p>“[Envision’s] overarching theme is to constantly look at how we can translate the advances in computer vision, and broadly AI, into tools that can help a visually impaired person live a more independent life.” — Karthik Kannan</p><p><br></p><p>“We mix both data from open datasets plus we throw in a healthy mix of data that's captured from a visually impaired person's perspective — that's what makes the whole data collection and cleaning process quite unique at Envision.” — Karthik Kannan</p><p><br></p><p>“That whole process of [user] validation is extremely, extremely important because we're not the direct users of the product ourselves.” — Karthik Kannan</p><p><br></p><p>“In the AI space right now, the most important thing is to try and understand where, or have a very clear idea as to what kind of impact AI is making on your customers, and to double down on that.” — Karthik Kannan</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/karthikk-314">Karthik Kannan on LinkedIn</a></p><p><a href="https://twitter.com/meTheKarthik">Karthik Kannan on Twitter</a></p><p><a href="https://www.letsenvision.com/">Envision</a> </p><p><a href="https://twitter.com/LetsEnvision">Envision on Twitter</a></p><p><a href="https://www.youtube.com/@EnvisionAI">Envision on YouTube</a></p><p><a href="https://www.youtube.com/watch?v=p7j5PFeupuM">‘Envisioners Day- Hear from our users!’</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Today’s AI-powered company of focus, Envision, has made it its mission to improve the lives of the visually impaired so that they can live more independently. I am joined by Envision’s Co-Founder and CTO, Karthik Kannan, to discuss how he and his team gather data for their unique models, how they develop new products and features, and how they are able to ensure that their technology performs well with multiple users and across various environments. Technological advances and the recent boom in AI mean that now is the perfect time for Envision to thrive, and Karthik explains exactly how he and his team are taking advantage of this unique moment. We end this informative discussion with Karthik’s advice for other leaders of AI-powered startups, and what he hopes Envision will achieve in the next five years. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Karthik Kannan as he explains what led him to found Envision. </li><li>What Envision does and how its technology improves the lives of visually impaired people.</li><li>How Karthik and his team gather data for training their models. </li><li>His process of planning and developing a new machine learning feature. </li><li>Diving deeper into the development of a new product or feature. </li><li>How Karthik ensures that his technology performs across multiple users and environments. </li><li>Why now is the perfect time for Envision’s technology to thrive. </li><li>How Karthik measures the impact of his company’s technology. </li><li>His advice to other AI-powered startup leaders, and his hopes for the future of Envision.  </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“I was fascinated with games. I had asked my dad how people make games and he said, ‘They write code.’ Then he put me onto a programming class and that's how I got started with writing code.” — Karthik Kannan</p><p><br></p><p>“I didn't go on to my master's or anything. I just did my bachelor's, just started working directly, because I was more eager to get my hands dirty into making software and stuff.” — Karthik Kannan</p><p><br></p><p>“[Envision’s] overarching theme is to constantly look at how we can translate the advances in computer vision, and broadly AI, into tools that can help a visually impaired person live a more independent life.” — Karthik Kannan</p><p><br></p><p>“We mix both data from open datasets plus we throw in a healthy mix of data that's captured from a visually impaired person's perspective — that's what makes the whole data collection and cleaning process quite unique at Envision.” — Karthik Kannan</p><p><br></p><p>“That whole process of [user] validation is extremely, extremely important because we're not the direct users of the product ourselves.” — Karthik Kannan</p><p><br></p><p>“In the AI space right now, the most important thing is to try and understand where, or have a very clear idea as to what kind of impact AI is making on your customers, and to double down on that.” — Karthik Kannan</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/karthikk-314">Karthik Kannan on LinkedIn</a></p><p><a href="https://twitter.com/meTheKarthik">Karthik Kannan on Twitter</a></p><p><a href="https://www.letsenvision.com/">Envision</a> </p><p><a href="https://twitter.com/LetsEnvision">Envision on Twitter</a></p><p><a href="https://www.youtube.com/@EnvisionAI">Envision on YouTube</a></p><p><a href="https://www.youtube.com/watch?v=p7j5PFeupuM">‘Envisioners Day- Hear from our users!’</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 03 Jul 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/801dc469/6313e2cd.mp3" length="44367641" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/Q_LzDQnez0Y1zqKqmSjOim5RwGnhWUDXqtXafQ1p5AU/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEzODU0MDgv/MTY4Njg1MzI2OS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1844</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Today’s AI-powered company of focus, Envision, has made it its mission to improve the lives of the visually impaired so that they can live more independently. I am joined by Envision’s Co-Founder and CTO, Karthik Kannan, to discuss how he and his team gather data for their unique models, how they develop new products and features, and how they are able to ensure that their technology performs well with multiple users and across various environments. Technological advances and the recent boom in AI mean that now is the perfect time for Envision to thrive, and Karthik explains exactly how he and his team are taking advantage of this unique moment. We end this informative discussion with Karthik’s advice for other leaders of AI-powered startups, and what he hopes Envision will achieve in the next five years. </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Karthik Kannan as he explains what led him to found Envision. </li><li>What Envision does and how its technology improves the lives of visually impaired people.</li><li>How Karthik and his team gather data for training their models. </li><li>His process of planning and developing a new machine learning feature. </li><li>Diving deeper into the development of a new product or feature. </li><li>How Karthik ensures that his technology performs across multiple users and environments. </li><li>Why now is the perfect time for Envision’s technology to thrive. </li><li>How Karthik measures the impact of his company’s technology. </li><li>His advice to other AI-powered startup leaders, and his hopes for the future of Envision.  </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“I was fascinated with games. I had asked my dad how people make games and he said, ‘They write code.’ Then he put me onto a programming class and that's how I got started with writing code.” — Karthik Kannan</p><p><br></p><p>“I didn't go on to my master's or anything. I just did my bachelor's, just started working directly, because I was more eager to get my hands dirty into making software and stuff.” — Karthik Kannan</p><p><br></p><p>“[Envision’s] overarching theme is to constantly look at how we can translate the advances in computer vision, and broadly AI, into tools that can help a visually impaired person live a more independent life.” — Karthik Kannan</p><p><br></p><p>“We mix both data from open datasets plus we throw in a healthy mix of data that's captured from a visually impaired person's perspective — that's what makes the whole data collection and cleaning process quite unique at Envision.” — Karthik Kannan</p><p><br></p><p>“That whole process of [user] validation is extremely, extremely important because we're not the direct users of the product ourselves.” — Karthik Kannan</p><p><br></p><p>“In the AI space right now, the most important thing is to try and understand where, or have a very clear idea as to what kind of impact AI is making on your customers, and to double down on that.” — Karthik Kannan</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/karthikk-314">Karthik Kannan on LinkedIn</a></p><p><a href="https://twitter.com/meTheKarthik">Karthik Kannan on Twitter</a></p><p><a href="https://www.letsenvision.com/">Envision</a> </p><p><a href="https://twitter.com/LetsEnvision">Envision on Twitter</a></p><p><a href="https://www.youtube.com/@EnvisionAI">Envision on YouTube</a></p><p><a href="https://www.youtube.com/watch?v=p7j5PFeupuM">‘Envisioners Day- Hear from our users!’</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, computer vision, smart glasses, envision glasses</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/801dc469/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Conquering Cancer with Sergio Pereira from Lunit</title>
      <itunes:episode>38</itunes:episode>
      <podcast:episode>38</podcast:episode>
      <itunes:title>Conquering Cancer with Sergio Pereira from Lunit</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">b7efad90-137e-41fc-87d6-b24fe6dccbd4</guid>
      <link>https://pixelscientia.com/podcast/conquering-cancer-with-sergio-pereira-from-lunit/</link>
      <description>
        <![CDATA[<p>AI seems to be taking the world by storm, and it is easy to use this new technology for either good or bad. Today I am joined by Sérgio Pereira, the VP of AI Research, Oncology Group, at Lunit, a company using AI for good by conquering cancer with machine learning.</p><p>You’ll hear about Sérgio’s professional background, Lunit’s missions, how they use AI for cancer screening and treatment planning, and so much more. Sérgio delves into how they read imaging before discussing the differences between supervised, self-supervised, and contrastive learning. Lunit has created an incredible dataset called Ocelot, and he tells us all about its benefits, how they published it, and why publishing a paper while ensuring that quality products are being produced is a challenge. Finally, Sérgio tells us his hopes for the future of Lunit.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A brief overview of Sérgio Pereira's background, and what led him to Lunit. </li><li>Lunit’s mission and why it’s important in fighting cancer. </li><li>How Lunit applies machine learning for cancer screening and treatment planning.</li><li>What most of the Lunit products are based on and how genetics come into play. </li><li>Why subjectivity becomes an issue when it comes to reading images in machine learning. </li><li>Sérgio explains what supervised, self-supervised, and contrastive learning is.</li><li>The lessons from machine learning work that can be applied to other types of imaging. </li><li>He tells us about Lunit’s dataset, Ocelot, and the benefits of it.</li><li>How Lunit publishes their datasets. </li><li>The challenges of getting a paper out while getting a product on the market. </li><li>Sérgio shares some important things AI-powered startups need to consider. </li><li>Where Sérgio sees the impact of Lunit in the near future. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our mission at Lunit is to conquer cancer through AI.” — Sérgio Pereira</p><p><br></p><p>“We don’t have many products at Lunit, that’s a fact, but the ones we have, we believe they are [the] best-in-class.” — Sérgio Pereira</p><p><br></p><p>“Mistakes in healthcare can have a very big impact, so we need to be able to show and demonstrate that our products work as we promised.” — Sérgio Pereira</p><p><br></p><p>“AI can be used for good and for bad. Let’s make sure we work on the good part.” — Sérgio Pereira</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/pereirasrm/">Sérgio Pereira on LinkedIn</a></p><p><a href="https://scholar.google.pt/citations">Sérgio Pereira on Google Scholar</a></p><p><a href="https://twitter.com/pereirasrm">Sérgio Pereira on Twitter</a></p><p><a href="https://www.lunit.io/en">Lunit Inc.</a><br>Paper: <a href="https://arxiv.org/abs/2303.13110">OCELOT: Overlapped Cell on Tissue Dataset for Histopathology</a></p><p>Dataset: <a href="https://lunit-io.github.io/research/publications/ocelot/">OCELOT</a></p><p>Paper: <a href="https://arxiv.org/abs/2212.04690">Benchmarking Self-Supervised Learning on Diverse Pathology Datasets</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>AI seems to be taking the world by storm, and it is easy to use this new technology for either good or bad. Today I am joined by Sérgio Pereira, the VP of AI Research, Oncology Group, at Lunit, a company using AI for good by conquering cancer with machine learning.</p><p>You’ll hear about Sérgio’s professional background, Lunit’s missions, how they use AI for cancer screening and treatment planning, and so much more. Sérgio delves into how they read imaging before discussing the differences between supervised, self-supervised, and contrastive learning. Lunit has created an incredible dataset called Ocelot, and he tells us all about its benefits, how they published it, and why publishing a paper while ensuring that quality products are being produced is a challenge. Finally, Sérgio tells us his hopes for the future of Lunit.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A brief overview of Sérgio Pereira's background, and what led him to Lunit. </li><li>Lunit’s mission and why it’s important in fighting cancer. </li><li>How Lunit applies machine learning for cancer screening and treatment planning.</li><li>What most of the Lunit products are based on and how genetics come into play. </li><li>Why subjectivity becomes an issue when it comes to reading images in machine learning. </li><li>Sérgio explains what supervised, self-supervised, and contrastive learning is.</li><li>The lessons from machine learning work that can be applied to other types of imaging. </li><li>He tells us about Lunit’s dataset, Ocelot, and the benefits of it.</li><li>How Lunit publishes their datasets. </li><li>The challenges of getting a paper out while getting a product on the market. </li><li>Sérgio shares some important things AI-powered startups need to consider. </li><li>Where Sérgio sees the impact of Lunit in the near future. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our mission at Lunit is to conquer cancer through AI.” — Sérgio Pereira</p><p><br></p><p>“We don’t have many products at Lunit, that’s a fact, but the ones we have, we believe they are [the] best-in-class.” — Sérgio Pereira</p><p><br></p><p>“Mistakes in healthcare can have a very big impact, so we need to be able to show and demonstrate that our products work as we promised.” — Sérgio Pereira</p><p><br></p><p>“AI can be used for good and for bad. Let’s make sure we work on the good part.” — Sérgio Pereira</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/pereirasrm/">Sérgio Pereira on LinkedIn</a></p><p><a href="https://scholar.google.pt/citations">Sérgio Pereira on Google Scholar</a></p><p><a href="https://twitter.com/pereirasrm">Sérgio Pereira on Twitter</a></p><p><a href="https://www.lunit.io/en">Lunit Inc.</a><br>Paper: <a href="https://arxiv.org/abs/2303.13110">OCELOT: Overlapped Cell on Tissue Dataset for Histopathology</a></p><p>Dataset: <a href="https://lunit-io.github.io/research/publications/ocelot/">OCELOT</a></p><p>Paper: <a href="https://arxiv.org/abs/2212.04690">Benchmarking Self-Supervised Learning on Diverse Pathology Datasets</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 26 Jun 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/59c26a6b/5125dac0.mp3" length="45902940" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/4qlNJGb_NxJEJVL4YX2wsb-1leGdoMePvmde-gXtOas/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEzMzM2MTIv/MTY4MzkyNDc2MC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1908</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>AI seems to be taking the world by storm, and it is easy to use this new technology for either good or bad. Today I am joined by Sérgio Pereira, the VP of AI Research, Oncology Group, at Lunit, a company using AI for good by conquering cancer with machine learning.</p><p>You’ll hear about Sérgio’s professional background, Lunit’s missions, how they use AI for cancer screening and treatment planning, and so much more. Sérgio delves into how they read imaging before discussing the differences between supervised, self-supervised, and contrastive learning. Lunit has created an incredible dataset called Ocelot, and he tells us all about its benefits, how they published it, and why publishing a paper while ensuring that quality products are being produced is a challenge. Finally, Sérgio tells us his hopes for the future of Lunit.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A brief overview of Sérgio Pereira's background, and what led him to Lunit. </li><li>Lunit’s mission and why it’s important in fighting cancer. </li><li>How Lunit applies machine learning for cancer screening and treatment planning.</li><li>What most of the Lunit products are based on and how genetics come into play. </li><li>Why subjectivity becomes an issue when it comes to reading images in machine learning. </li><li>Sérgio explains what supervised, self-supervised, and contrastive learning is.</li><li>The lessons from machine learning work that can be applied to other types of imaging. </li><li>He tells us about Lunit’s dataset, Ocelot, and the benefits of it.</li><li>How Lunit publishes their datasets. </li><li>The challenges of getting a paper out while getting a product on the market. </li><li>Sérgio shares some important things AI-powered startups need to consider. </li><li>Where Sérgio sees the impact of Lunit in the near future. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Our mission at Lunit is to conquer cancer through AI.” — Sérgio Pereira</p><p><br></p><p>“We don’t have many products at Lunit, that’s a fact, but the ones we have, we believe they are [the] best-in-class.” — Sérgio Pereira</p><p><br></p><p>“Mistakes in healthcare can have a very big impact, so we need to be able to show and demonstrate that our products work as we promised.” — Sérgio Pereira</p><p><br></p><p>“AI can be used for good and for bad. Let’s make sure we work on the good part.” — Sérgio Pereira</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/pereirasrm/">Sérgio Pereira on LinkedIn</a></p><p><a href="https://scholar.google.pt/citations">Sérgio Pereira on Google Scholar</a></p><p><a href="https://twitter.com/pereirasrm">Sérgio Pereira on Twitter</a></p><p><a href="https://www.lunit.io/en">Lunit Inc.</a><br>Paper: <a href="https://arxiv.org/abs/2303.13110">OCELOT: Overlapped Cell on Tissue Dataset for Histopathology</a></p><p>Dataset: <a href="https://lunit-io.github.io/research/publications/ocelot/">OCELOT</a></p><p>Paper: <a href="https://arxiv.org/abs/2212.04690">Benchmarking Self-Supervised Learning on Diverse Pathology Datasets</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, computer vision, cancer, medical imaging</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/59c26a6b/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Smart Recycling with Tanner Cook from CleanRobotics</title>
      <itunes:episode>37</itunes:episode>
      <podcast:episode>37</podcast:episode>
      <itunes:title>Smart Recycling with Tanner Cook from CleanRobotics</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">10ab26d3-60ed-471a-b7d1-b6b2e2583f01</guid>
      <link>https://pixelscientia.com/podcast/smart-recycling-with-tanner-cook-from-cleanrobotics/</link>
      <description>
        <![CDATA[<p>Sustainable waste disposal has been a global pain point for many decades. While the recent push toward comprehensive recycling has eased the pressure a little, there’s still much more to be done if we are to build a sustainable society. Luckily for us, the progression of AI brings new hope for feasible waste disposal, and today’s guest, the CTO and Co-Founder of CleanRobotics, Tanner Cook, is here to tell us how his company is playing its part in improving the disposal of waste, recycling, and compost.</p><p>In our conversation, we learn about CleanRobotics and why the company’s work is vital for sustainability, the ins and outs of their smart recycling product, TrashBot, and how it uses machine learning, how CleanRobotics ensures that its technology is always improving and up to date, and the impact of their AI-powered systems on sustainable waste management. Plus, Tanner offers up some noteworthy advice for other leaders of AI-powered start-ups before sharing his vision of the future of CleanRobotics.  </p><p><br></p><p><strong>Key Points: </strong></p><ul><li>Introducing the CTO and Co-Founder of CleanRobotics, Tanner Cook. </li><li>Tanner’s background and how he ended up as a co-founder. </li><li>What CleanRobotics does and why this work is important for sustainability. </li><li>Assessing the information that CleanRobotics is able to extract from its TrashBot product. </li><li>The role of machine learning in TrashBot technology. </li><li>How they gather and annotate data with TrashBot. </li><li>The challenges of training machine learning models on imagery. </li><li>How Tanner and his team improve their technology and ensure that it’s always up to date. </li><li>The way CleanRobotics measures the impact of its technology. </li><li>Tanner’s advice to other leaders of AI-powered startups. </li><li>What he’d like CleanRobotics to achieve over the next five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[I] found myself looking at trash cans very closely with my co-founder, Charles Yhap, and realizing, at the bin level and where people dispose of things, there were a lot of problems going on, and a lot of problems that artificial intelligence and robotics could solve.” — Tanner Cook</p><p><br></p><p>“The number of rule sets are very diverse throughout the United States and throughout the world. The rules can easily change for what is and isn't recyclable when you drive 20 minutes outside of your city.” — Tanner Cook</p><p><br></p><p>“One of our personal tellers internally for CleanRobotics is sustainability. Putting in those checks and balances to make sure that we're actually doing something good - instead of just greenwashing - is very important to us.” — Tanner Cook</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/tanner-cook1/">Tanner Cook on LinkedIn</a> </p><p><a href="https://cleanrobotics.com/">CleanRobotics</a> </p><p><a href="https://cleanrobotics.com/trashbot/">TrashBot</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Sustainable waste disposal has been a global pain point for many decades. While the recent push toward comprehensive recycling has eased the pressure a little, there’s still much more to be done if we are to build a sustainable society. Luckily for us, the progression of AI brings new hope for feasible waste disposal, and today’s guest, the CTO and Co-Founder of CleanRobotics, Tanner Cook, is here to tell us how his company is playing its part in improving the disposal of waste, recycling, and compost.</p><p>In our conversation, we learn about CleanRobotics and why the company’s work is vital for sustainability, the ins and outs of their smart recycling product, TrashBot, and how it uses machine learning, how CleanRobotics ensures that its technology is always improving and up to date, and the impact of their AI-powered systems on sustainable waste management. Plus, Tanner offers up some noteworthy advice for other leaders of AI-powered start-ups before sharing his vision of the future of CleanRobotics.  </p><p><br></p><p><strong>Key Points: </strong></p><ul><li>Introducing the CTO and Co-Founder of CleanRobotics, Tanner Cook. </li><li>Tanner’s background and how he ended up as a co-founder. </li><li>What CleanRobotics does and why this work is important for sustainability. </li><li>Assessing the information that CleanRobotics is able to extract from its TrashBot product. </li><li>The role of machine learning in TrashBot technology. </li><li>How they gather and annotate data with TrashBot. </li><li>The challenges of training machine learning models on imagery. </li><li>How Tanner and his team improve their technology and ensure that it’s always up to date. </li><li>The way CleanRobotics measures the impact of its technology. </li><li>Tanner’s advice to other leaders of AI-powered startups. </li><li>What he’d like CleanRobotics to achieve over the next five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[I] found myself looking at trash cans very closely with my co-founder, Charles Yhap, and realizing, at the bin level and where people dispose of things, there were a lot of problems going on, and a lot of problems that artificial intelligence and robotics could solve.” — Tanner Cook</p><p><br></p><p>“The number of rule sets are very diverse throughout the United States and throughout the world. The rules can easily change for what is and isn't recyclable when you drive 20 minutes outside of your city.” — Tanner Cook</p><p><br></p><p>“One of our personal tellers internally for CleanRobotics is sustainability. Putting in those checks and balances to make sure that we're actually doing something good - instead of just greenwashing - is very important to us.” — Tanner Cook</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/tanner-cook1/">Tanner Cook on LinkedIn</a> </p><p><a href="https://cleanrobotics.com/">CleanRobotics</a> </p><p><a href="https://cleanrobotics.com/trashbot/">TrashBot</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 19 Jun 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/4bb4a034/04204a5a.mp3" length="25569557" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/LAJx7d63kJyb6YZAzLZE5zh2pvkLj62au5VlYQ1yh9M/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEzODU0MDIv/MTY4Njg1Mjc4Ni1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1060</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Sustainable waste disposal has been a global pain point for many decades. While the recent push toward comprehensive recycling has eased the pressure a little, there’s still much more to be done if we are to build a sustainable society. Luckily for us, the progression of AI brings new hope for feasible waste disposal, and today’s guest, the CTO and Co-Founder of CleanRobotics, Tanner Cook, is here to tell us how his company is playing its part in improving the disposal of waste, recycling, and compost.</p><p>In our conversation, we learn about CleanRobotics and why the company’s work is vital for sustainability, the ins and outs of their smart recycling product, TrashBot, and how it uses machine learning, how CleanRobotics ensures that its technology is always improving and up to date, and the impact of their AI-powered systems on sustainable waste management. Plus, Tanner offers up some noteworthy advice for other leaders of AI-powered start-ups before sharing his vision of the future of CleanRobotics.  </p><p><br></p><p><strong>Key Points: </strong></p><ul><li>Introducing the CTO and Co-Founder of CleanRobotics, Tanner Cook. </li><li>Tanner’s background and how he ended up as a co-founder. </li><li>What CleanRobotics does and why this work is important for sustainability. </li><li>Assessing the information that CleanRobotics is able to extract from its TrashBot product. </li><li>The role of machine learning in TrashBot technology. </li><li>How they gather and annotate data with TrashBot. </li><li>The challenges of training machine learning models on imagery. </li><li>How Tanner and his team improve their technology and ensure that it’s always up to date. </li><li>The way CleanRobotics measures the impact of its technology. </li><li>Tanner’s advice to other leaders of AI-powered startups. </li><li>What he’d like CleanRobotics to achieve over the next five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[I] found myself looking at trash cans very closely with my co-founder, Charles Yhap, and realizing, at the bin level and where people dispose of things, there were a lot of problems going on, and a lot of problems that artificial intelligence and robotics could solve.” — Tanner Cook</p><p><br></p><p>“The number of rule sets are very diverse throughout the United States and throughout the world. The rules can easily change for what is and isn't recyclable when you drive 20 minutes outside of your city.” — Tanner Cook</p><p><br></p><p>“One of our personal tellers internally for CleanRobotics is sustainability. Putting in those checks and balances to make sure that we're actually doing something good - instead of just greenwashing - is very important to us.” — Tanner Cook</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/tanner-cook1/">Tanner Cook on LinkedIn</a> </p><p><a href="https://cleanrobotics.com/">CleanRobotics</a> </p><p><a href="https://cleanrobotics.com/trashbot/">TrashBot</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>machine learning, computer vision, recycling, sustainability</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/4bb4a034/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Accelerating Medicinal Chemistry with Aaron Morris from PostEra</title>
      <itunes:episode>36</itunes:episode>
      <podcast:episode>36</podcast:episode>
      <itunes:title>Accelerating Medicinal Chemistry with Aaron Morris from PostEra</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">7d4dadcf-3c8f-4c76-9900-e68ae5f13435</guid>
      <link>https://pixelscientia.com/podcast/accelerating-medicinal-chemistry-with-aaron-morris-from-postera/</link>
      <description>
        <![CDATA[<p>In the traditional paradigm, it can take up to ten years for a drug to come to market. For this episode, I am joined by guest Aaron Morris, Co-founder and CEO of PostEra, to talk about using AI to accelerate medicinal chemistry and bring cures to patients faster than ever before.</p><p>Aaron breaks down the medicinal chemistry process and explains how PostEra applies machine learning to drug discovery. The data landscape within drug discovery is particularly challenging and today, we learn about PostEra’s approach to gathering data, the data sets they build from, and how they find new uses for project-specific data. Hear about the importance of model interpretability and how to get a competitive advantage as an AI-powered startup.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Aaron Morris’ background in mathematics and how it led to the creation of PostEra.</li><li>The scientific disciplines involved in developing a drug.</li><li>PostEra’s focus: building the world’s most advanced ML platform for medicinal chemistry.</li><li>Aaron explains the process of medicinal chemistry.</li><li>How PostEra applies machine learning to the drug discovery process.</li><li>The challenging data landscape within drug discovery and the data sets PostEra builds from.</li><li>PostEra’s approach to gathering data, and how they use it.</li><li>The challenge of finding new uses for project-specific data.</li><li>How PostEra validates its models.</li><li>Why PostEra makes its models less black box and how they go about it.</li><li>The importance of model interpretability and how PostEra develops interpretable ML.</li><li>Aaron’s advice for other leaders of AI-powered startups.</li><li>His vision for PostEra’s impact in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Though being reasonably competent on the machine learning side, I had a very, very steep learning curve when it came to getting up to speed with drug discovery chemistry and the applications of AI in that domain.” — Aaron Morris</p><p><br></p><p>“Drug discovery is going from biology to chemistry to medicine and PostEra squarely focuses, at least for now, on the chemistry angle. Our main focus is to build the world’s most advanced machine learning platform for what is referred to as medicinal chemistry.” — Aaron Morris</p><p><br></p><p>“PostEra is really the first AI company to pioneer machine learning across all three stages of how to design a molecule, how to make the molecule, and how to select the optimal set of molecules to test.” — Aaron Morris</p><p><br></p><p>“There is a lot of project-specific data that gets generated, and often what that means for PostEra is we’re having to be very inventive about how we try to get the most out of data even if it is not relevant.” — Aaron Morris</p><p><br></p><p>“If you want to build defensibility as a company, you have to have more than just innovations on model architecture.” — Aaron Morris</p><p><br></p><p>“Your typical drug today is taking anywhere between eight to ten years to come to market and obviously, we want to really accelerate that.” — Aaron Morris</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/aaron-morris-9922277b">Aaron Morris on LinkedIn</a></p><p><a href="https://twitter.com/AaronPMorris">Aaron Morris on Twitter</a></p><p><a href="https://postera.ai/">PostEra</a></p><p><a href="https://twitter.com/PostEra_AI">PostEra on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In the traditional paradigm, it can take up to ten years for a drug to come to market. For this episode, I am joined by guest Aaron Morris, Co-founder and CEO of PostEra, to talk about using AI to accelerate medicinal chemistry and bring cures to patients faster than ever before.</p><p>Aaron breaks down the medicinal chemistry process and explains how PostEra applies machine learning to drug discovery. The data landscape within drug discovery is particularly challenging and today, we learn about PostEra’s approach to gathering data, the data sets they build from, and how they find new uses for project-specific data. Hear about the importance of model interpretability and how to get a competitive advantage as an AI-powered startup.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Aaron Morris’ background in mathematics and how it led to the creation of PostEra.</li><li>The scientific disciplines involved in developing a drug.</li><li>PostEra’s focus: building the world’s most advanced ML platform for medicinal chemistry.</li><li>Aaron explains the process of medicinal chemistry.</li><li>How PostEra applies machine learning to the drug discovery process.</li><li>The challenging data landscape within drug discovery and the data sets PostEra builds from.</li><li>PostEra’s approach to gathering data, and how they use it.</li><li>The challenge of finding new uses for project-specific data.</li><li>How PostEra validates its models.</li><li>Why PostEra makes its models less black box and how they go about it.</li><li>The importance of model interpretability and how PostEra develops interpretable ML.</li><li>Aaron’s advice for other leaders of AI-powered startups.</li><li>His vision for PostEra’s impact in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Though being reasonably competent on the machine learning side, I had a very, very steep learning curve when it came to getting up to speed with drug discovery chemistry and the applications of AI in that domain.” — Aaron Morris</p><p><br></p><p>“Drug discovery is going from biology to chemistry to medicine and PostEra squarely focuses, at least for now, on the chemistry angle. Our main focus is to build the world’s most advanced machine learning platform for what is referred to as medicinal chemistry.” — Aaron Morris</p><p><br></p><p>“PostEra is really the first AI company to pioneer machine learning across all three stages of how to design a molecule, how to make the molecule, and how to select the optimal set of molecules to test.” — Aaron Morris</p><p><br></p><p>“There is a lot of project-specific data that gets generated, and often what that means for PostEra is we’re having to be very inventive about how we try to get the most out of data even if it is not relevant.” — Aaron Morris</p><p><br></p><p>“If you want to build defensibility as a company, you have to have more than just innovations on model architecture.” — Aaron Morris</p><p><br></p><p>“Your typical drug today is taking anywhere between eight to ten years to come to market and obviously, we want to really accelerate that.” — Aaron Morris</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/aaron-morris-9922277b">Aaron Morris on LinkedIn</a></p><p><a href="https://twitter.com/AaronPMorris">Aaron Morris on Twitter</a></p><p><a href="https://postera.ai/">PostEra</a></p><p><a href="https://twitter.com/PostEra_AI">PostEra on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 12 Jun 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/e3b18554/d85c7d82.mp3" length="22341796" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/3hmQSOwYhzJ9Kt1MQ29xk-lpUCdVzZsTrlkqL9DN8DQ/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEzMzM2MDgv/MTY4MzkxODY3OC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1392</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In the traditional paradigm, it can take up to ten years for a drug to come to market. For this episode, I am joined by guest Aaron Morris, Co-founder and CEO of PostEra, to talk about using AI to accelerate medicinal chemistry and bring cures to patients faster than ever before.</p><p>Aaron breaks down the medicinal chemistry process and explains how PostEra applies machine learning to drug discovery. The data landscape within drug discovery is particularly challenging and today, we learn about PostEra’s approach to gathering data, the data sets they build from, and how they find new uses for project-specific data. Hear about the importance of model interpretability and how to get a competitive advantage as an AI-powered startup.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Aaron Morris’ background in mathematics and how it led to the creation of PostEra.</li><li>The scientific disciplines involved in developing a drug.</li><li>PostEra’s focus: building the world’s most advanced ML platform for medicinal chemistry.</li><li>Aaron explains the process of medicinal chemistry.</li><li>How PostEra applies machine learning to the drug discovery process.</li><li>The challenging data landscape within drug discovery and the data sets PostEra builds from.</li><li>PostEra’s approach to gathering data, and how they use it.</li><li>The challenge of finding new uses for project-specific data.</li><li>How PostEra validates its models.</li><li>Why PostEra makes its models less black box and how they go about it.</li><li>The importance of model interpretability and how PostEra develops interpretable ML.</li><li>Aaron’s advice for other leaders of AI-powered startups.</li><li>His vision for PostEra’s impact in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Though being reasonably competent on the machine learning side, I had a very, very steep learning curve when it came to getting up to speed with drug discovery chemistry and the applications of AI in that domain.” — Aaron Morris</p><p><br></p><p>“Drug discovery is going from biology to chemistry to medicine and PostEra squarely focuses, at least for now, on the chemistry angle. Our main focus is to build the world’s most advanced machine learning platform for what is referred to as medicinal chemistry.” — Aaron Morris</p><p><br></p><p>“PostEra is really the first AI company to pioneer machine learning across all three stages of how to design a molecule, how to make the molecule, and how to select the optimal set of molecules to test.” — Aaron Morris</p><p><br></p><p>“There is a lot of project-specific data that gets generated, and often what that means for PostEra is we’re having to be very inventive about how we try to get the most out of data even if it is not relevant.” — Aaron Morris</p><p><br></p><p>“If you want to build defensibility as a company, you have to have more than just innovations on model architecture.” — Aaron Morris</p><p><br></p><p>“Your typical drug today is taking anywhere between eight to ten years to come to market and obviously, we want to really accelerate that.” — Aaron Morris</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/aaron-morris-9922277b">Aaron Morris on LinkedIn</a></p><p><a href="https://twitter.com/AaronPMorris">Aaron Morris on Twitter</a></p><p><a href="https://postera.ai/">PostEra</a></p><p><a href="https://twitter.com/PostEra_AI">PostEra on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/e3b18554/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Autonomous Drones for Farming with Amr Omar from Precision AI</title>
      <itunes:episode>35</itunes:episode>
      <podcast:episode>35</podcast:episode>
      <itunes:title>Autonomous Drones for Farming with Amr Omar from Precision AI</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">05c462ed-ccb7-43f6-b4ee-6a55f8c3558f</guid>
      <link>https://pixelscientia.com/podcast/autonomous-drones-for-farming-with-amr-omar-from-precision-ai/</link>
      <description>
        <![CDATA[<p>I am excited to welcome the Head of AI at Precision AI, Amr Omar. Precision AI has recently taken to agriculture by using drone technology to deliver precise herbicide doses to crops that are in need.</p><p>In today’s episode, Amr explains why this technology is crucial for the future of farming and how machine learning factors into the process. From being at the mercy of the weather to not being able to distinguish between good and bad crops when they are seedlings, there are many challenges involved with using drones and AI technology for farming, and my guest lays them all out whilst explaining the solutions that he and his team have come up with.</p><p>You’ll learn about Amr’s process for developing new machine learning products and features, the non-negotiables he prioritizes in state-of-the-art reviews, what he looks for when building a successful team, his advice for other AI startup leaders, and so much more!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Amr Omar as he explains how he ended up as Head of AI at Precision AI.</li><li>What Precision AI does and why this work is so important for farming. </li><li>The role of machine learning in Precision AI’s technology. </li><li>Challenges that arise when using drones for farming, and how Amr’s team overcomes them. </li><li>How Amr makes the drone models generalizable without sacrificing other restrictions. </li><li>A look at his process for developing a new machine-learning product or feature. </li><li>What Amr and his team look for and prioritize when doing state-of-the-art reviews.</li><li>The approaches to recruiting and onboarding that have been successful in building his team. </li><li>How he measures the impact of his drone technology: the field test. </li><li>Amr shares some advice for other AI-powered startup leaders.  </li><li>How he sees Precision AI impacting the market in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“What we offer here at Precision AI is putting only what needs to be sprayed in real-time speed, using drones to kill those that are unwanted, which eventually saves a lot of money. At the same time, it increases the value of the crops coming out of that process.” — Amr Omar</p><p><br></p><p>“The flexibility to pivot within the development of a certain feature is what empowers any team that's developing AI-driven applications or products to scale and succeed without facing any unexpected challenges.” — Amr Omar</p><p><br></p><p>“Most [problems have] solutions. It's just about how much you are willing to invest in that solution versus the value you're going to get out of that.” — Amr Omar</p><p><br></p><p>“We all have this dream big mentality at Precision AI, from the leadership to the junior engineers. We all wish to make something big happen with what we are doing. To be able to achieve that, you need a team of believers.” — Amr Omar</p><p><br></p><p>“Bet on the process [and] not on the product while working in machine learning or in AI-driven teams. The process is way more important than the product. The product will come at the end of the day.” — Amr Omar</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/amr-elsehemy-04199671/">Amr Omar on LinkedIn</a></p><p><a href="https://twitter.com/amr_elsehemy">Amr Omar on Twitter</a>  </p><p><a href="mailto:amr@precision.ai">Amr Omar Email</a></p><p><a href="https://www.precision.ai/">Precision AI</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>I am excited to welcome the Head of AI at Precision AI, Amr Omar. Precision AI has recently taken to agriculture by using drone technology to deliver precise herbicide doses to crops that are in need.</p><p>In today’s episode, Amr explains why this technology is crucial for the future of farming and how machine learning factors into the process. From being at the mercy of the weather to not being able to distinguish between good and bad crops when they are seedlings, there are many challenges involved with using drones and AI technology for farming, and my guest lays them all out whilst explaining the solutions that he and his team have come up with.</p><p>You’ll learn about Amr’s process for developing new machine learning products and features, the non-negotiables he prioritizes in state-of-the-art reviews, what he looks for when building a successful team, his advice for other AI startup leaders, and so much more!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Amr Omar as he explains how he ended up as Head of AI at Precision AI.</li><li>What Precision AI does and why this work is so important for farming. </li><li>The role of machine learning in Precision AI’s technology. </li><li>Challenges that arise when using drones for farming, and how Amr’s team overcomes them. </li><li>How Amr makes the drone models generalizable without sacrificing other restrictions. </li><li>A look at his process for developing a new machine-learning product or feature. </li><li>What Amr and his team look for and prioritize when doing state-of-the-art reviews.</li><li>The approaches to recruiting and onboarding that have been successful in building his team. </li><li>How he measures the impact of his drone technology: the field test. </li><li>Amr shares some advice for other AI-powered startup leaders.  </li><li>How he sees Precision AI impacting the market in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“What we offer here at Precision AI is putting only what needs to be sprayed in real-time speed, using drones to kill those that are unwanted, which eventually saves a lot of money. At the same time, it increases the value of the crops coming out of that process.” — Amr Omar</p><p><br></p><p>“The flexibility to pivot within the development of a certain feature is what empowers any team that's developing AI-driven applications or products to scale and succeed without facing any unexpected challenges.” — Amr Omar</p><p><br></p><p>“Most [problems have] solutions. It's just about how much you are willing to invest in that solution versus the value you're going to get out of that.” — Amr Omar</p><p><br></p><p>“We all have this dream big mentality at Precision AI, from the leadership to the junior engineers. We all wish to make something big happen with what we are doing. To be able to achieve that, you need a team of believers.” — Amr Omar</p><p><br></p><p>“Bet on the process [and] not on the product while working in machine learning or in AI-driven teams. The process is way more important than the product. The product will come at the end of the day.” — Amr Omar</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/amr-elsehemy-04199671/">Amr Omar on LinkedIn</a></p><p><a href="https://twitter.com/amr_elsehemy">Amr Omar on Twitter</a>  </p><p><a href="mailto:amr@precision.ai">Amr Omar Email</a></p><p><a href="https://www.precision.ai/">Precision AI</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 05 Jun 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/9d1a8ace/e9faaef0.mp3" length="39313051" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/oX_m6LNpDRg4u6gy2lugyIYFOfgdHoQMjALzkk48XwQ/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEzMzM2MDUv/MTY4MzkyNDc0Ni1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1634</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>I am excited to welcome the Head of AI at Precision AI, Amr Omar. Precision AI has recently taken to agriculture by using drone technology to deliver precise herbicide doses to crops that are in need.</p><p>In today’s episode, Amr explains why this technology is crucial for the future of farming and how machine learning factors into the process. From being at the mercy of the weather to not being able to distinguish between good and bad crops when they are seedlings, there are many challenges involved with using drones and AI technology for farming, and my guest lays them all out whilst explaining the solutions that he and his team have come up with.</p><p>You’ll learn about Amr’s process for developing new machine learning products and features, the non-negotiables he prioritizes in state-of-the-art reviews, what he looks for when building a successful team, his advice for other AI startup leaders, and so much more!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Introducing Amr Omar as he explains how he ended up as Head of AI at Precision AI.</li><li>What Precision AI does and why this work is so important for farming. </li><li>The role of machine learning in Precision AI’s technology. </li><li>Challenges that arise when using drones for farming, and how Amr’s team overcomes them. </li><li>How Amr makes the drone models generalizable without sacrificing other restrictions. </li><li>A look at his process for developing a new machine-learning product or feature. </li><li>What Amr and his team look for and prioritize when doing state-of-the-art reviews.</li><li>The approaches to recruiting and onboarding that have been successful in building his team. </li><li>How he measures the impact of his drone technology: the field test. </li><li>Amr shares some advice for other AI-powered startup leaders.  </li><li>How he sees Precision AI impacting the market in the next three to five years. </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“What we offer here at Precision AI is putting only what needs to be sprayed in real-time speed, using drones to kill those that are unwanted, which eventually saves a lot of money. At the same time, it increases the value of the crops coming out of that process.” — Amr Omar</p><p><br></p><p>“The flexibility to pivot within the development of a certain feature is what empowers any team that's developing AI-driven applications or products to scale and succeed without facing any unexpected challenges.” — Amr Omar</p><p><br></p><p>“Most [problems have] solutions. It's just about how much you are willing to invest in that solution versus the value you're going to get out of that.” — Amr Omar</p><p><br></p><p>“We all have this dream big mentality at Precision AI, from the leadership to the junior engineers. We all wish to make something big happen with what we are doing. To be able to achieve that, you need a team of believers.” — Amr Omar</p><p><br></p><p>“Bet on the process [and] not on the product while working in machine learning or in AI-driven teams. The process is way more important than the product. The product will come at the end of the day.” — Amr Omar</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/amr-elsehemy-04199671/">Amr Omar on LinkedIn</a></p><p><a href="https://twitter.com/amr_elsehemy">Amr Omar on Twitter</a>  </p><p><a href="mailto:amr@precision.ai">Amr Omar Email</a></p><p><a href="https://www.precision.ai/">Precision AI</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/9d1a8ace/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Activity Recognition for Healthcare with Harro Stokman from Kepler Vision</title>
      <itunes:episode>34</itunes:episode>
      <podcast:episode>34</podcast:episode>
      <itunes:title>Activity Recognition for Healthcare with Harro Stokman from Kepler Vision</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">ba3951c5-22fc-46de-a9ad-370fbc9e924c</guid>
      <link>https://pixelscientia.com/podcast/activity-recognition-for-healthcare-with-harro-stokman-from-kepler-vision/</link>
      <description>
        <![CDATA[<p>Today on Impact AI we welcome the CEO and Founder of AI healthcare company Kepler Vision, Harro Stokman. Kepler Vision is using computer vision to aid the healthcare world in recognizing falls in elderly patients, and Harro explains why the specificity of this focus is such a strength for the company.</p><p>Using computer vision and an ever-growing dataset to perfectly detect situations where personnel is needed is no small feat, and answers the staffing issues often associated with care facilities during the night. In our chat, Harro explains some of the technical aspects of the software and the major improvements he has overseen recently before going into some connected topics such as privacy concerns, hiring practices, and validating the accuracy of the models. Harro is also kind enough to offer some general comments and advice regarding AI startups, and the areas he believes are most vital for founders to attend to.</p><p>So if you would like to hear about a great practical application of AI in the healthcare space, and some thoughts from a leader making waves in some uncharted waters, be sure to listen in with us!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Harro talks about his academic and professional background and his companies before Kepler Vision. </li><li>The specific problems that Kepler Vision is solving.  </li><li>Understanding the role of machine learning in Kepler Vision's service. </li><li>Harro shares the biggest challenges that he and his company have faced.  </li><li>The task of building trust and the hiring practices that contribute to this.</li><li>Validating the accuracy of models; Harro unpacks the labor-intensive process. </li><li>The improvements that have been made to the software through iterative updates.</li><li>Measuring the impact of the software; Harro talks about customer satisfaction. </li><li>Advice from Harro to AI startups about hiring and focus. </li><li>Harro shares his vision for the next five years at Kepler Vision and where to find them online.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Over time, we added more and more examples to our training sets, and we are now at a phase where our software pretty much works out of the box actually.” — Harro Stokman</p><p><br></p><p>“So in the field of elderly care and hospital care, our software can look after the wellbeing of elderly clients and that is all we do and we do nothing more. But what we do, we do incredibly [well].” — Harro Stokman</p><p><br></p><p>“We have stayed faithful to the healthcare vertical. So my advice would be to focus.” — Harro Stokman</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/harro?originalSubdomain=nl">Harro Stokman on LinkedIn</a></p><p><a href="https://keplervision.eu/en/">Kepler Vision</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Today on Impact AI we welcome the CEO and Founder of AI healthcare company Kepler Vision, Harro Stokman. Kepler Vision is using computer vision to aid the healthcare world in recognizing falls in elderly patients, and Harro explains why the specificity of this focus is such a strength for the company.</p><p>Using computer vision and an ever-growing dataset to perfectly detect situations where personnel is needed is no small feat, and answers the staffing issues often associated with care facilities during the night. In our chat, Harro explains some of the technical aspects of the software and the major improvements he has overseen recently before going into some connected topics such as privacy concerns, hiring practices, and validating the accuracy of the models. Harro is also kind enough to offer some general comments and advice regarding AI startups, and the areas he believes are most vital for founders to attend to.</p><p>So if you would like to hear about a great practical application of AI in the healthcare space, and some thoughts from a leader making waves in some uncharted waters, be sure to listen in with us!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Harro talks about his academic and professional background and his companies before Kepler Vision. </li><li>The specific problems that Kepler Vision is solving.  </li><li>Understanding the role of machine learning in Kepler Vision's service. </li><li>Harro shares the biggest challenges that he and his company have faced.  </li><li>The task of building trust and the hiring practices that contribute to this.</li><li>Validating the accuracy of models; Harro unpacks the labor-intensive process. </li><li>The improvements that have been made to the software through iterative updates.</li><li>Measuring the impact of the software; Harro talks about customer satisfaction. </li><li>Advice from Harro to AI startups about hiring and focus. </li><li>Harro shares his vision for the next five years at Kepler Vision and where to find them online.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Over time, we added more and more examples to our training sets, and we are now at a phase where our software pretty much works out of the box actually.” — Harro Stokman</p><p><br></p><p>“So in the field of elderly care and hospital care, our software can look after the wellbeing of elderly clients and that is all we do and we do nothing more. But what we do, we do incredibly [well].” — Harro Stokman</p><p><br></p><p>“We have stayed faithful to the healthcare vertical. So my advice would be to focus.” — Harro Stokman</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/harro?originalSubdomain=nl">Harro Stokman on LinkedIn</a></p><p><a href="https://keplervision.eu/en/">Kepler Vision</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 29 May 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/3593b327/94c5ae95.mp3" length="26318168" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/S-ZejFpjcaD45E2q988YyZMIyvGTzGEqEShep1TmPwQ/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEzMzM2MDMv/MTY4MzkxODI2Ni1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1092</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Today on Impact AI we welcome the CEO and Founder of AI healthcare company Kepler Vision, Harro Stokman. Kepler Vision is using computer vision to aid the healthcare world in recognizing falls in elderly patients, and Harro explains why the specificity of this focus is such a strength for the company.</p><p>Using computer vision and an ever-growing dataset to perfectly detect situations where personnel is needed is no small feat, and answers the staffing issues often associated with care facilities during the night. In our chat, Harro explains some of the technical aspects of the software and the major improvements he has overseen recently before going into some connected topics such as privacy concerns, hiring practices, and validating the accuracy of the models. Harro is also kind enough to offer some general comments and advice regarding AI startups, and the areas he believes are most vital for founders to attend to.</p><p>So if you would like to hear about a great practical application of AI in the healthcare space, and some thoughts from a leader making waves in some uncharted waters, be sure to listen in with us!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Harro talks about his academic and professional background and his companies before Kepler Vision. </li><li>The specific problems that Kepler Vision is solving.  </li><li>Understanding the role of machine learning in Kepler Vision's service. </li><li>Harro shares the biggest challenges that he and his company have faced.  </li><li>The task of building trust and the hiring practices that contribute to this.</li><li>Validating the accuracy of models; Harro unpacks the labor-intensive process. </li><li>The improvements that have been made to the software through iterative updates.</li><li>Measuring the impact of the software; Harro talks about customer satisfaction. </li><li>Advice from Harro to AI startups about hiring and focus. </li><li>Harro shares his vision for the next five years at Kepler Vision and where to find them online.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Over time, we added more and more examples to our training sets, and we are now at a phase where our software pretty much works out of the box actually.” — Harro Stokman</p><p><br></p><p>“So in the field of elderly care and hospital care, our software can look after the wellbeing of elderly clients and that is all we do and we do nothing more. But what we do, we do incredibly [well].” — Harro Stokman</p><p><br></p><p>“We have stayed faithful to the healthcare vertical. So my advice would be to focus.” — Harro Stokman</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/harro?originalSubdomain=nl">Harro Stokman on LinkedIn</a></p><p><a href="https://keplervision.eu/en/">Kepler Vision</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/3593b327/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Leveraging Geospatial Data with Daniel Bailey from Astraea</title>
      <itunes:episode>33</itunes:episode>
      <podcast:episode>33</podcast:episode>
      <itunes:title>Leveraging Geospatial Data with Daniel Bailey from Astraea</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">21634f7a-ffd7-4871-becd-6a0b68d2517d</guid>
      <link>https://pixelscientia.com/podcast/leveraging-geospatial-data-with-daniel-bailey-from-astraea/</link>
      <description>
        <![CDATA[<p>Can AI be used to solve major planetary problems? Today I'm joined by the CEO and Co-Founder of Astraea, Daniel Bailey, to talk about leveraging geospatial data for sustainability pursuits. Astraea's platform uses satellite imagery and AI to enable customers to access and operationalize spatiotemporal insights across multiple industries including clean energy, agriculture, conservation, carbon finance, and real estate.</p><p>Daniel fills us in on the issues Astraea aims to solve and the role of machine learning in its mission. We find out what makes satellite imagery unique (and uniquely challenging to work with) and how Astraea ensures that its models continue to meet customers’ needs over time. Daniel shares insight into the ML development process and advice for other leaders of AI-powered startups. Tune in to discover the balance between model accuracy and explainability, the importance of transparency when it comes to voluntary carbon markets, and more! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Daniel Bailey’s background and how it led him to create Astraea.</li><li>What Astraea does; the planetary problems it aims to solve.</li><li>The role of machine learning in Astraea’s technology.</li><li>The insights Astraea extracts from satellite data and the models they use to do so.</li><li>What makes satellite imagery unique (and uniquely challenging to work with).</li><li>How Astraea ensures their models continue to meet customers’ needs over time.</li><li>The balance between model accuracy and explainability.</li><li>Astraea’s ML development process.</li><li>The first steps to solving the business case with ML.</li><li>The importance of involving stakeholders in the development process.</li><li>Daniel’s advice for other leaders of AI-powered startups.</li><li>Why it’s critical to stay focused on the business needs.</li><li>The training data required to meet global needs.</li><li>Daniel’s vision for the future impact of Astraea.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We're in this golden age of measurement. There's more data than you can look at individually. You really have to have something like AI/ML to recognize those patterns and extract those valuable insights from the data.” — Daniel Bailey</p><p><br></p><p>“Satellite imagery is a unique beast, for sure … The dimensionality of the data is completely unique.” — Daniel Bailey</p><p><br></p><p>“We do champion challenger techniques so that when we have a model in production, we're constantly looking for a better model and innovating on that capability.” — Daniel Bailey</p><p><br></p><p>“Without transparency, the voluntary market will collapse. We will never reach our goal of a two-degree Celsius without the voluntary carbon markets that are by nature a deregulated marketplace.” — Daniel Bailey</p><p><br></p><p>“When we think about creating ML products and features within the product, we think about using the most simplistic approach first.” — Daniel Bailey</p><p><br></p><p>“In the geospatial AI space, it is going to take a community to provide the capabilities we need to resolve some of these intractable problems we're facing as a planet.” — Daniel Bailey</p><p><br></p><p>“We need more training data to build better models to meet the needs that we're seeing globally.” — Daniel Bailey</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/daniel-bailey-05841349/">Daniel Bailey on LinkedIn</a></p><p><a href="https://twitter.com/daniel_bailey3">Daniel Bailey on Twitter</a></p><p><a href="https://www.astraea.earth/">Astraea</a></p><p><a href="https://www.linkedin.com/company/astraea/">Astraea on LinkedIn</a></p><p><a href="https://twitter.com/AstraeaInc">Astraea on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Can AI be used to solve major planetary problems? Today I'm joined by the CEO and Co-Founder of Astraea, Daniel Bailey, to talk about leveraging geospatial data for sustainability pursuits. Astraea's platform uses satellite imagery and AI to enable customers to access and operationalize spatiotemporal insights across multiple industries including clean energy, agriculture, conservation, carbon finance, and real estate.</p><p>Daniel fills us in on the issues Astraea aims to solve and the role of machine learning in its mission. We find out what makes satellite imagery unique (and uniquely challenging to work with) and how Astraea ensures that its models continue to meet customers’ needs over time. Daniel shares insight into the ML development process and advice for other leaders of AI-powered startups. Tune in to discover the balance between model accuracy and explainability, the importance of transparency when it comes to voluntary carbon markets, and more! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Daniel Bailey’s background and how it led him to create Astraea.</li><li>What Astraea does; the planetary problems it aims to solve.</li><li>The role of machine learning in Astraea’s technology.</li><li>The insights Astraea extracts from satellite data and the models they use to do so.</li><li>What makes satellite imagery unique (and uniquely challenging to work with).</li><li>How Astraea ensures their models continue to meet customers’ needs over time.</li><li>The balance between model accuracy and explainability.</li><li>Astraea’s ML development process.</li><li>The first steps to solving the business case with ML.</li><li>The importance of involving stakeholders in the development process.</li><li>Daniel’s advice for other leaders of AI-powered startups.</li><li>Why it’s critical to stay focused on the business needs.</li><li>The training data required to meet global needs.</li><li>Daniel’s vision for the future impact of Astraea.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We're in this golden age of measurement. There's more data than you can look at individually. You really have to have something like AI/ML to recognize those patterns and extract those valuable insights from the data.” — Daniel Bailey</p><p><br></p><p>“Satellite imagery is a unique beast, for sure … The dimensionality of the data is completely unique.” — Daniel Bailey</p><p><br></p><p>“We do champion challenger techniques so that when we have a model in production, we're constantly looking for a better model and innovating on that capability.” — Daniel Bailey</p><p><br></p><p>“Without transparency, the voluntary market will collapse. We will never reach our goal of a two-degree Celsius without the voluntary carbon markets that are by nature a deregulated marketplace.” — Daniel Bailey</p><p><br></p><p>“When we think about creating ML products and features within the product, we think about using the most simplistic approach first.” — Daniel Bailey</p><p><br></p><p>“In the geospatial AI space, it is going to take a community to provide the capabilities we need to resolve some of these intractable problems we're facing as a planet.” — Daniel Bailey</p><p><br></p><p>“We need more training data to build better models to meet the needs that we're seeing globally.” — Daniel Bailey</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/daniel-bailey-05841349/">Daniel Bailey on LinkedIn</a></p><p><a href="https://twitter.com/daniel_bailey3">Daniel Bailey on Twitter</a></p><p><a href="https://www.astraea.earth/">Astraea</a></p><p><a href="https://www.linkedin.com/company/astraea/">Astraea on LinkedIn</a></p><p><a href="https://twitter.com/AstraeaInc">Astraea on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 22 May 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/81a23a10/5bcb48e9.mp3" length="32643670" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/PXPeDCM7XcNxDiPbdI35g9TYgSIwqYjg1KelbCgGSQA/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEyODYzMTMv/MTY4MTMxNzQxMS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1352</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Can AI be used to solve major planetary problems? Today I'm joined by the CEO and Co-Founder of Astraea, Daniel Bailey, to talk about leveraging geospatial data for sustainability pursuits. Astraea's platform uses satellite imagery and AI to enable customers to access and operationalize spatiotemporal insights across multiple industries including clean energy, agriculture, conservation, carbon finance, and real estate.</p><p>Daniel fills us in on the issues Astraea aims to solve and the role of machine learning in its mission. We find out what makes satellite imagery unique (and uniquely challenging to work with) and how Astraea ensures that its models continue to meet customers’ needs over time. Daniel shares insight into the ML development process and advice for other leaders of AI-powered startups. Tune in to discover the balance between model accuracy and explainability, the importance of transparency when it comes to voluntary carbon markets, and more! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Daniel Bailey’s background and how it led him to create Astraea.</li><li>What Astraea does; the planetary problems it aims to solve.</li><li>The role of machine learning in Astraea’s technology.</li><li>The insights Astraea extracts from satellite data and the models they use to do so.</li><li>What makes satellite imagery unique (and uniquely challenging to work with).</li><li>How Astraea ensures their models continue to meet customers’ needs over time.</li><li>The balance between model accuracy and explainability.</li><li>Astraea’s ML development process.</li><li>The first steps to solving the business case with ML.</li><li>The importance of involving stakeholders in the development process.</li><li>Daniel’s advice for other leaders of AI-powered startups.</li><li>Why it’s critical to stay focused on the business needs.</li><li>The training data required to meet global needs.</li><li>Daniel’s vision for the future impact of Astraea.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We're in this golden age of measurement. There's more data than you can look at individually. You really have to have something like AI/ML to recognize those patterns and extract those valuable insights from the data.” — Daniel Bailey</p><p><br></p><p>“Satellite imagery is a unique beast, for sure … The dimensionality of the data is completely unique.” — Daniel Bailey</p><p><br></p><p>“We do champion challenger techniques so that when we have a model in production, we're constantly looking for a better model and innovating on that capability.” — Daniel Bailey</p><p><br></p><p>“Without transparency, the voluntary market will collapse. We will never reach our goal of a two-degree Celsius without the voluntary carbon markets that are by nature a deregulated marketplace.” — Daniel Bailey</p><p><br></p><p>“When we think about creating ML products and features within the product, we think about using the most simplistic approach first.” — Daniel Bailey</p><p><br></p><p>“In the geospatial AI space, it is going to take a community to provide the capabilities we need to resolve some of these intractable problems we're facing as a planet.” — Daniel Bailey</p><p><br></p><p>“We need more training data to build better models to meet the needs that we're seeing globally.” — Daniel Bailey</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/daniel-bailey-05841349/">Daniel Bailey on LinkedIn</a></p><p><a href="https://twitter.com/daniel_bailey3">Daniel Bailey on Twitter</a></p><p><a href="https://www.astraea.earth/">Astraea</a></p><p><a href="https://www.linkedin.com/company/astraea/">Astraea on LinkedIn</a></p><p><a href="https://twitter.com/AstraeaInc">Astraea on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/81a23a10/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Predictive Modeling for Healthcare with Dave Decaprio from ClosedLoop</title>
      <itunes:episode>32</itunes:episode>
      <podcast:episode>32</podcast:episode>
      <itunes:title>Predictive Modeling for Healthcare with Dave Decaprio from ClosedLoop</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">11439f72-f2f2-48f8-849f-23635987162b</guid>
      <link>https://pixelscientia.com/podcast/predictive-modeling-for-healthcare-with-dave-decaprio-from-closedloop/</link>
      <description>
        <![CDATA[<p>In the words of Dave DeCaprio, “We need to move from a reactive healthcare system to a proactive one.” Dave is the CTO and Co-founder of ClosedLoop, a data science platform for healthcare that is using predictive AI to make this crucial shift.</p><p>In this episode, we learn about the problem he identified in the healthcare system that he felt he was uniquely set up to solve given his background, and how ClosedLoop is working to solve it. Dave shares use cases for ClosedLoop’s predictive models and the challenges he’s encountered in applying predictive modeling to health data. We find out why model interpretability is so important and learn about the role of human mediation in ClosedLoop’s applications. Dave explains the ways in which biases manifest in the world of health data and how ClosedLoop measures and mitigates bias. To find out how ClosedLoop measures its models over time, as well as the impact of its technology, tune in! Dave closes with some astute advice for other leaders of AI-powered startups and his vision for the near-future impact of ClosedLoop.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Dave DeCaprio's background and how it led to the creation of ClosedLoop.</li><li>The healthcare problem he felt he was uniquely set up to solve.</li><li>What ClosedLoop does and why it’s important for healthcare.</li><li>Use cases for ClosedLoop’s predictive models.</li><li>The challenges of working with health data and applying predictive modeling to it.</li><li>Why the model interpretability in ClosedLoop’s applications matters.</li><li>Human intelligence mediation in the interpretation process.</li><li>How ClosedLoop won the CMS AI health outcomes challenge.</li><li>Examples of how bias manifests in models trained with health data; how to measure and mitigate bias.</li><li>How ClosedLoop monitors its models over time and how COVID affected its accuracy.</li><li>The way ClosedLoop measures the impact of its technology.</li><li>Dave’s advice for other leaders of AI-powered startups.</li><li>His vision for the near-future impact of ClosedLoop.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“There were a lot of things I felt were broken about healthcare that I couldn’t do anything about, but I kept coming back to this idea of using all the right data to make the right decisions and getting the right treatment to the right patient at the right time.” — Dave DeCaprio</p><p><br></p><p>“[We] put ClosedLoop together to basically tackle this data science and AI in healthcare challenge.” — Dave DeCaprio</p><p><br></p><p>“Where prediction in AI plays a huge role in healthcare is moving from a reactive to a proactive system.” — Dave DeCaprio</p><p><br></p><p>“Healthcare data is very complex. There are tens of thousands of diagnosis codes, there are hundreds of thousands of drug codes, and they’re constantly changing.” — Dave DeCaprio</p><p><br></p><p>“In almost all cases, the output of our model is mediated with human intelligence in order to actually make a decision about a patient’s care.” — Dave DeCaprio</p><p><br></p><p>“The most powerful measures of the impact are the stories we get from our customers.” — Dave DeCaprio</p><p><br></p><p>“If you want to build a robust company that’s going to be successful year after year and be able to grow and really tackle these problems, you eventually have to show tangible demonstrable benefits.” — Dave DeCaprio</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/davedecaprio/">Dave DeCaprio on LinkedIn</a></p><p><a href="https://www.closedloop.ai/">ClosedLoop</a></p><p><a href="https://twitter.com/closedloopai">ClosedLoop on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In the words of Dave DeCaprio, “We need to move from a reactive healthcare system to a proactive one.” Dave is the CTO and Co-founder of ClosedLoop, a data science platform for healthcare that is using predictive AI to make this crucial shift.</p><p>In this episode, we learn about the problem he identified in the healthcare system that he felt he was uniquely set up to solve given his background, and how ClosedLoop is working to solve it. Dave shares use cases for ClosedLoop’s predictive models and the challenges he’s encountered in applying predictive modeling to health data. We find out why model interpretability is so important and learn about the role of human mediation in ClosedLoop’s applications. Dave explains the ways in which biases manifest in the world of health data and how ClosedLoop measures and mitigates bias. To find out how ClosedLoop measures its models over time, as well as the impact of its technology, tune in! Dave closes with some astute advice for other leaders of AI-powered startups and his vision for the near-future impact of ClosedLoop.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Dave DeCaprio's background and how it led to the creation of ClosedLoop.</li><li>The healthcare problem he felt he was uniquely set up to solve.</li><li>What ClosedLoop does and why it’s important for healthcare.</li><li>Use cases for ClosedLoop’s predictive models.</li><li>The challenges of working with health data and applying predictive modeling to it.</li><li>Why the model interpretability in ClosedLoop’s applications matters.</li><li>Human intelligence mediation in the interpretation process.</li><li>How ClosedLoop won the CMS AI health outcomes challenge.</li><li>Examples of how bias manifests in models trained with health data; how to measure and mitigate bias.</li><li>How ClosedLoop monitors its models over time and how COVID affected its accuracy.</li><li>The way ClosedLoop measures the impact of its technology.</li><li>Dave’s advice for other leaders of AI-powered startups.</li><li>His vision for the near-future impact of ClosedLoop.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“There were a lot of things I felt were broken about healthcare that I couldn’t do anything about, but I kept coming back to this idea of using all the right data to make the right decisions and getting the right treatment to the right patient at the right time.” — Dave DeCaprio</p><p><br></p><p>“[We] put ClosedLoop together to basically tackle this data science and AI in healthcare challenge.” — Dave DeCaprio</p><p><br></p><p>“Where prediction in AI plays a huge role in healthcare is moving from a reactive to a proactive system.” — Dave DeCaprio</p><p><br></p><p>“Healthcare data is very complex. There are tens of thousands of diagnosis codes, there are hundreds of thousands of drug codes, and they’re constantly changing.” — Dave DeCaprio</p><p><br></p><p>“In almost all cases, the output of our model is mediated with human intelligence in order to actually make a decision about a patient’s care.” — Dave DeCaprio</p><p><br></p><p>“The most powerful measures of the impact are the stories we get from our customers.” — Dave DeCaprio</p><p><br></p><p>“If you want to build a robust company that’s going to be successful year after year and be able to grow and really tackle these problems, you eventually have to show tangible demonstrable benefits.” — Dave DeCaprio</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/davedecaprio/">Dave DeCaprio on LinkedIn</a></p><p><a href="https://www.closedloop.ai/">ClosedLoop</a></p><p><a href="https://twitter.com/closedloopai">ClosedLoop on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 15 May 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/8a44ee0a/47a27fa1.mp3" length="25852756" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/oJb6s-drObs9cvPJtPJyflyNq0iITxhSnN6TQB_sNjY/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEzMzM2MDEv/MTY4MzkyMDM0MS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1612</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>In the words of Dave DeCaprio, “We need to move from a reactive healthcare system to a proactive one.” Dave is the CTO and Co-founder of ClosedLoop, a data science platform for healthcare that is using predictive AI to make this crucial shift.</p><p>In this episode, we learn about the problem he identified in the healthcare system that he felt he was uniquely set up to solve given his background, and how ClosedLoop is working to solve it. Dave shares use cases for ClosedLoop’s predictive models and the challenges he’s encountered in applying predictive modeling to health data. We find out why model interpretability is so important and learn about the role of human mediation in ClosedLoop’s applications. Dave explains the ways in which biases manifest in the world of health data and how ClosedLoop measures and mitigates bias. To find out how ClosedLoop measures its models over time, as well as the impact of its technology, tune in! Dave closes with some astute advice for other leaders of AI-powered startups and his vision for the near-future impact of ClosedLoop.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Dave DeCaprio's background and how it led to the creation of ClosedLoop.</li><li>The healthcare problem he felt he was uniquely set up to solve.</li><li>What ClosedLoop does and why it’s important for healthcare.</li><li>Use cases for ClosedLoop’s predictive models.</li><li>The challenges of working with health data and applying predictive modeling to it.</li><li>Why the model interpretability in ClosedLoop’s applications matters.</li><li>Human intelligence mediation in the interpretation process.</li><li>How ClosedLoop won the CMS AI health outcomes challenge.</li><li>Examples of how bias manifests in models trained with health data; how to measure and mitigate bias.</li><li>How ClosedLoop monitors its models over time and how COVID affected its accuracy.</li><li>The way ClosedLoop measures the impact of its technology.</li><li>Dave’s advice for other leaders of AI-powered startups.</li><li>His vision for the near-future impact of ClosedLoop.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“There were a lot of things I felt were broken about healthcare that I couldn’t do anything about, but I kept coming back to this idea of using all the right data to make the right decisions and getting the right treatment to the right patient at the right time.” — Dave DeCaprio</p><p><br></p><p>“[We] put ClosedLoop together to basically tackle this data science and AI in healthcare challenge.” — Dave DeCaprio</p><p><br></p><p>“Where prediction in AI plays a huge role in healthcare is moving from a reactive to a proactive system.” — Dave DeCaprio</p><p><br></p><p>“Healthcare data is very complex. There are tens of thousands of diagnosis codes, there are hundreds of thousands of drug codes, and they’re constantly changing.” — Dave DeCaprio</p><p><br></p><p>“In almost all cases, the output of our model is mediated with human intelligence in order to actually make a decision about a patient’s care.” — Dave DeCaprio</p><p><br></p><p>“The most powerful measures of the impact are the stories we get from our customers.” — Dave DeCaprio</p><p><br></p><p>“If you want to build a robust company that’s going to be successful year after year and be able to grow and really tackle these problems, you eventually have to show tangible demonstrable benefits.” — Dave DeCaprio</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/davedecaprio/">Dave DeCaprio on LinkedIn</a></p><p><a href="https://www.closedloop.ai/">ClosedLoop</a></p><p><a href="https://twitter.com/closedloopai">ClosedLoop on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/8a44ee0a/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Climate Resilience with Max Evans from ClimateAi</title>
      <itunes:episode>31</itunes:episode>
      <podcast:episode>31</podcast:episode>
      <itunes:title>Climate Resilience with Max Evans from ClimateAi</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">66a71a4f-c2e7-4a3b-8c40-4285d5a8eec5</guid>
      <link>https://pixelscientia.com/podcast/climate-resilience-with-max-evans-from-climateai/</link>
      <description>
        <![CDATA[<p>AI and machine learning are growing in significance as far as adapting to climate change is concerned. During today’s episode, I welcome Max Evans, founder and CTO at ClimateAi, to discuss the topic of climate resilience and how technology is driving progress in this arena.</p><p>Max begins our conversation with an overview of the important work he is doing at ClimateAi, before weighing in on the role of machine learning in the AI startup space. He describes in detail the chain of different machine learning models and the challenges associated with high dimensionality and quality in data of this nature. We touch on Max’s preferred methodologies, and he unpacks the role of literature searches, a lack of historical data, and the technological advancements he is able to leverage in his work at ClimateAi today.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Max Evans, Founder and CTO at ClimateAi.</li><li>What ClimateAi does and why it is important for adapting to climate change.</li><li>The role of machine learning in AI startups.</li><li>The chain of different ML models involved in making weather and climate data usable.</li><li>Tackling the challenge of high dimensionality and quality in machine learning data.</li><li>Projecting and adding broader impact functions to produce more meaningful data.</li><li>The hybrid between Stanford design thinking and the lean methodologies Max prefers.</li><li>What it means to include a literature search in the process.</li><li>Technological advancements leveraged by ClimateAi today. </li><li>Navigating a lack of historical data with synthetic data. </li><li>Micro and macro perspectives on climate decisions.</li><li>What the AI process is really about.</li><li>Max’s goal for ClimateAi in three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“ClimateAi’s mission is to climate-proof our economic system. We want all businesses to make climate-informed decisions.” — Max Evans</p><p><br></p><p>“ML is a core mindset of AI startups in terms of how you solve problems, how you start with the exploratory data analysis, the hypothesis building, the baseline and investigation, the modeling, and the many loops.” — Max Evans</p><p><br></p><p>“Don't start with the technology or the product, but start with a customer need.” — Max Evans</p><p><br></p><p>“It wouldn't have been possible before, and it is definitely possible now to start building forecasts of the climate.” — Max Evans</p><p><br></p><p>“Newer methods are developing so rapidly that the edge of what's possible continuously shifts outward.” — Max Evans</p><p><br></p><p>“[The AI process] is really about building a data-driven, hypothesis-driven, need-solving culture in both your technological team and in your broader team at large.” — Max Evans</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/maximilian-evans-1b653770/">Max Evans on LinkedIn</a></p><p><a href="https://climate.ai/">ClimateAi</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>AI and machine learning are growing in significance as far as adapting to climate change is concerned. During today’s episode, I welcome Max Evans, founder and CTO at ClimateAi, to discuss the topic of climate resilience and how technology is driving progress in this arena.</p><p>Max begins our conversation with an overview of the important work he is doing at ClimateAi, before weighing in on the role of machine learning in the AI startup space. He describes in detail the chain of different machine learning models and the challenges associated with high dimensionality and quality in data of this nature. We touch on Max’s preferred methodologies, and he unpacks the role of literature searches, a lack of historical data, and the technological advancements he is able to leverage in his work at ClimateAi today.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Max Evans, Founder and CTO at ClimateAi.</li><li>What ClimateAi does and why it is important for adapting to climate change.</li><li>The role of machine learning in AI startups.</li><li>The chain of different ML models involved in making weather and climate data usable.</li><li>Tackling the challenge of high dimensionality and quality in machine learning data.</li><li>Projecting and adding broader impact functions to produce more meaningful data.</li><li>The hybrid between Stanford design thinking and the lean methodologies Max prefers.</li><li>What it means to include a literature search in the process.</li><li>Technological advancements leveraged by ClimateAi today. </li><li>Navigating a lack of historical data with synthetic data. </li><li>Micro and macro perspectives on climate decisions.</li><li>What the AI process is really about.</li><li>Max’s goal for ClimateAi in three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“ClimateAi’s mission is to climate-proof our economic system. We want all businesses to make climate-informed decisions.” — Max Evans</p><p><br></p><p>“ML is a core mindset of AI startups in terms of how you solve problems, how you start with the exploratory data analysis, the hypothesis building, the baseline and investigation, the modeling, and the many loops.” — Max Evans</p><p><br></p><p>“Don't start with the technology or the product, but start with a customer need.” — Max Evans</p><p><br></p><p>“It wouldn't have been possible before, and it is definitely possible now to start building forecasts of the climate.” — Max Evans</p><p><br></p><p>“Newer methods are developing so rapidly that the edge of what's possible continuously shifts outward.” — Max Evans</p><p><br></p><p>“[The AI process] is really about building a data-driven, hypothesis-driven, need-solving culture in both your technological team and in your broader team at large.” — Max Evans</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/maximilian-evans-1b653770/">Max Evans on LinkedIn</a></p><p><a href="https://climate.ai/">ClimateAi</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 08 May 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/0124cdfe/29c4a966.mp3" length="37288788" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/15-8sAyBeHsxYlQs-Cq3O-B3QSTPTeYnwOiC5sSosXU/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEyODYyOTUv/MTY4MTMxNzIyOS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1549</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>AI and machine learning are growing in significance as far as adapting to climate change is concerned. During today’s episode, I welcome Max Evans, founder and CTO at ClimateAi, to discuss the topic of climate resilience and how technology is driving progress in this arena.</p><p>Max begins our conversation with an overview of the important work he is doing at ClimateAi, before weighing in on the role of machine learning in the AI startup space. He describes in detail the chain of different machine learning models and the challenges associated with high dimensionality and quality in data of this nature. We touch on Max’s preferred methodologies, and he unpacks the role of literature searches, a lack of historical data, and the technological advancements he is able to leverage in his work at ClimateAi today.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Max Evans, Founder and CTO at ClimateAi.</li><li>What ClimateAi does and why it is important for adapting to climate change.</li><li>The role of machine learning in AI startups.</li><li>The chain of different ML models involved in making weather and climate data usable.</li><li>Tackling the challenge of high dimensionality and quality in machine learning data.</li><li>Projecting and adding broader impact functions to produce more meaningful data.</li><li>The hybrid between Stanford design thinking and the lean methodologies Max prefers.</li><li>What it means to include a literature search in the process.</li><li>Technological advancements leveraged by ClimateAi today. </li><li>Navigating a lack of historical data with synthetic data. </li><li>Micro and macro perspectives on climate decisions.</li><li>What the AI process is really about.</li><li>Max’s goal for ClimateAi in three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“ClimateAi’s mission is to climate-proof our economic system. We want all businesses to make climate-informed decisions.” — Max Evans</p><p><br></p><p>“ML is a core mindset of AI startups in terms of how you solve problems, how you start with the exploratory data analysis, the hypothesis building, the baseline and investigation, the modeling, and the many loops.” — Max Evans</p><p><br></p><p>“Don't start with the technology or the product, but start with a customer need.” — Max Evans</p><p><br></p><p>“It wouldn't have been possible before, and it is definitely possible now to start building forecasts of the climate.” — Max Evans</p><p><br></p><p>“Newer methods are developing so rapidly that the edge of what's possible continuously shifts outward.” — Max Evans</p><p><br></p><p>“[The AI process] is really about building a data-driven, hypothesis-driven, need-solving culture in both your technological team and in your broader team at large.” — Max Evans</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/maximilian-evans-1b653770/">Max Evans on LinkedIn</a></p><p><a href="https://climate.ai/">ClimateAi</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/0124cdfe/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Searching for New Therapeutics in Nature with David Healey from Enveda Biosciences</title>
      <itunes:episode>30</itunes:episode>
      <podcast:episode>30</podcast:episode>
      <itunes:title>Searching for New Therapeutics in Nature with David Healey from Enveda Biosciences</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">d7be1a72-5763-4cd2-b5f8-2546d149e5a1</guid>
      <link>https://pixelscientia.com/podcast/searching-for-new-therapeutics-in-nature-with-david-healey-from-enveda-biosciences/</link>
      <description>
        <![CDATA[<p>New therapeutics refer to newly developed drugs, treatments, or interventions designed to prevent, treat, or cure diseases or medical conditions. The process of discovering new therapeutics is a complex and challenging task that requires significant resources and expertise.</p><p>In today’s episode, I sit down with David Healey, the Vice President of Data Science at Enveda Biosciences, to discuss searching for new therapeutics in nature. Enveda Biosciences is a cutting-edge biotech company revolutionizing drug discovery processes using automation and machine learning. It has a unique approach involving mapping the vast unknown chemical space in nature to identify potential therapeutics. David is a data scientist with a knack for machine learning in life sciences. He has expertise in deep neural networks, computer vision, natural language, and graph models, including a solid background in drug discovery, cheminformatics, metabolomics, and experimental biology.</p><p>In our conversation, we talk about the role of machine learning in drug discovery and the importance of developing treatments. We discuss using big data for drug discovery, the challenges and opportunities of the field, the hurdles of working with mass spectrometry data, and Enveda Biosciences’s approach to research. Hear how Enveda Biosciences finds the best talent, why drug discovery is an exciting field, and much more.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>David’s background leading up to his role at Enveda Biosciences.</li><li>What Enveda Biosciences focuses on and their approach to drug discovery.</li><li>Learn about mass spectrometry, tandem mass spectrometry, and chromatography.</li><li>The role of machine learning in biosciences and how it is used with mass spectrometry.</li><li>Enveda Bioscience’s applies machine learning differently.</li><li>He explains the challenges encountered when working with mass spectrometry data.</li><li>Find out the value of large language models and other advances in the field.</li><li>We unpack the niche nature of the work Enveda Biosciences is doing.</li><li>Overview of the different types of experts that are working at Enveda Biosciences.</li><li>David shares what recruiting approaches have been most successful for the company.</li><li>Advice that David has for other AI-powered startups.</li><li>He tells us about the impact he wants Enveda Biosciences to have in the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Fundamentally, what we are doing at Enveda is looking for active molecules in nature. What that involves is trying to learn what the molecules are that nature produces, and what they do.” — David Healey</p><p><br></p><p>“We are using machine learning to sort of interpret the language of the mass spectrometry, and in particular, to treat it like a natural language problem, or like a machine translation problem.” — David Healey</p><p><br></p><p>“We do a lot of work on learning better representations of the spectra so that we can compare them with each other in a way that would better approximate the actual similarity of a molecule.” — David Healey</p><p><br></p><p>“Really being deliberate about getting the best talent in the door at the very beginning, I think, is really crucial.” — David Healey</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/david-healey-a0a8143/">David Healey on LinkedIn</a></p><p><a href="https://www.envedabio.com">Enveda Biosciences</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>New therapeutics refer to newly developed drugs, treatments, or interventions designed to prevent, treat, or cure diseases or medical conditions. The process of discovering new therapeutics is a complex and challenging task that requires significant resources and expertise.</p><p>In today’s episode, I sit down with David Healey, the Vice President of Data Science at Enveda Biosciences, to discuss searching for new therapeutics in nature. Enveda Biosciences is a cutting-edge biotech company revolutionizing drug discovery processes using automation and machine learning. It has a unique approach involving mapping the vast unknown chemical space in nature to identify potential therapeutics. David is a data scientist with a knack for machine learning in life sciences. He has expertise in deep neural networks, computer vision, natural language, and graph models, including a solid background in drug discovery, cheminformatics, metabolomics, and experimental biology.</p><p>In our conversation, we talk about the role of machine learning in drug discovery and the importance of developing treatments. We discuss using big data for drug discovery, the challenges and opportunities of the field, the hurdles of working with mass spectrometry data, and Enveda Biosciences’s approach to research. Hear how Enveda Biosciences finds the best talent, why drug discovery is an exciting field, and much more.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>David’s background leading up to his role at Enveda Biosciences.</li><li>What Enveda Biosciences focuses on and their approach to drug discovery.</li><li>Learn about mass spectrometry, tandem mass spectrometry, and chromatography.</li><li>The role of machine learning in biosciences and how it is used with mass spectrometry.</li><li>Enveda Bioscience’s applies machine learning differently.</li><li>He explains the challenges encountered when working with mass spectrometry data.</li><li>Find out the value of large language models and other advances in the field.</li><li>We unpack the niche nature of the work Enveda Biosciences is doing.</li><li>Overview of the different types of experts that are working at Enveda Biosciences.</li><li>David shares what recruiting approaches have been most successful for the company.</li><li>Advice that David has for other AI-powered startups.</li><li>He tells us about the impact he wants Enveda Biosciences to have in the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Fundamentally, what we are doing at Enveda is looking for active molecules in nature. What that involves is trying to learn what the molecules are that nature produces, and what they do.” — David Healey</p><p><br></p><p>“We are using machine learning to sort of interpret the language of the mass spectrometry, and in particular, to treat it like a natural language problem, or like a machine translation problem.” — David Healey</p><p><br></p><p>“We do a lot of work on learning better representations of the spectra so that we can compare them with each other in a way that would better approximate the actual similarity of a molecule.” — David Healey</p><p><br></p><p>“Really being deliberate about getting the best talent in the door at the very beginning, I think, is really crucial.” — David Healey</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/david-healey-a0a8143/">David Healey on LinkedIn</a></p><p><a href="https://www.envedabio.com">Enveda Biosciences</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 01 May 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/b2dbf80e/8a1faacf.mp3" length="42587509" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/yYcujoVsYI41WPPTleN9dVUymBfy6DczBXovjrII0b0/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEyODYyNzcv/MTY4MTMxNzA2Ny1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1768</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>New therapeutics refer to newly developed drugs, treatments, or interventions designed to prevent, treat, or cure diseases or medical conditions. The process of discovering new therapeutics is a complex and challenging task that requires significant resources and expertise.</p><p>In today’s episode, I sit down with David Healey, the Vice President of Data Science at Enveda Biosciences, to discuss searching for new therapeutics in nature. Enveda Biosciences is a cutting-edge biotech company revolutionizing drug discovery processes using automation and machine learning. It has a unique approach involving mapping the vast unknown chemical space in nature to identify potential therapeutics. David is a data scientist with a knack for machine learning in life sciences. He has expertise in deep neural networks, computer vision, natural language, and graph models, including a solid background in drug discovery, cheminformatics, metabolomics, and experimental biology.</p><p>In our conversation, we talk about the role of machine learning in drug discovery and the importance of developing treatments. We discuss using big data for drug discovery, the challenges and opportunities of the field, the hurdles of working with mass spectrometry data, and Enveda Biosciences’s approach to research. Hear how Enveda Biosciences finds the best talent, why drug discovery is an exciting field, and much more.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>David’s background leading up to his role at Enveda Biosciences.</li><li>What Enveda Biosciences focuses on and their approach to drug discovery.</li><li>Learn about mass spectrometry, tandem mass spectrometry, and chromatography.</li><li>The role of machine learning in biosciences and how it is used with mass spectrometry.</li><li>Enveda Bioscience’s applies machine learning differently.</li><li>He explains the challenges encountered when working with mass spectrometry data.</li><li>Find out the value of large language models and other advances in the field.</li><li>We unpack the niche nature of the work Enveda Biosciences is doing.</li><li>Overview of the different types of experts that are working at Enveda Biosciences.</li><li>David shares what recruiting approaches have been most successful for the company.</li><li>Advice that David has for other AI-powered startups.</li><li>He tells us about the impact he wants Enveda Biosciences to have in the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Fundamentally, what we are doing at Enveda is looking for active molecules in nature. What that involves is trying to learn what the molecules are that nature produces, and what they do.” — David Healey</p><p><br></p><p>“We are using machine learning to sort of interpret the language of the mass spectrometry, and in particular, to treat it like a natural language problem, or like a machine translation problem.” — David Healey</p><p><br></p><p>“We do a lot of work on learning better representations of the spectra so that we can compare them with each other in a way that would better approximate the actual similarity of a molecule.” — David Healey</p><p><br></p><p>“Really being deliberate about getting the best talent in the door at the very beginning, I think, is really crucial.” — David Healey</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/david-healey-a0a8143/">David Healey on LinkedIn</a></p><p><a href="https://www.envedabio.com">Enveda Biosciences</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/b2dbf80e/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Trustworthy AI with Yiannis Kanellopoulos from Code4Thought</title>
      <itunes:episode>29</itunes:episode>
      <podcast:episode>29</podcast:episode>
      <itunes:title>Trustworthy AI with Yiannis Kanellopoulos from Code4Thought</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">ec53f0ca-71e5-4e31-8bf5-d073c2c6386c</guid>
      <link>https://pixelscientia.com/podcast/trustworthy-ai-with-yiannis-kanellopoulos-from-code4thought/</link>
      <description>
        <![CDATA[<p>The demand for trustworthy AI is increasing across all sectors. Today’s guest is committed to mitigating biases of AI models and ensuring the responsible use of AI technology. Yiannis Kanellopoulos is the Founder and CEO of Code4Thought, the state-of-the-art AI audit solution for trustworthy AI.</p><p>In this episode, we discuss what it means for AI to be trustworthy and Yiannis explains the process by which Code4Thought evaluates the trustworthiness of AI models. We discover how biases manifest and how best to mitigate them, as well as the role of explainability in evaluating the trustworthiness of a model. Tune in to hear Yiannis’ advice on what to consider when developing a model and why the trustworthiness of your business solution should never be an afterthought.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Yiannis Kanellopoulos’ background; how it led him to create Code4Thought.</li><li>What Code4Thought does and why it’s important for the future of AI.</li><li>What it means for AI to be trustworthy.</li><li>How Code4Thought evaluates the trustworthiness of AI models.</li><li>Yiannis shares a use case evaluation in the healthcare sphere.</li><li>Why Code4Thought’s independent perspective is so important.</li><li>Yiannis explains how biases manifest in AI technology and shares mitigation strategies.</li><li>The role explainability plays in evaluating the trustworthiness of a model.</li><li>Why explainability is particularly important for financial services.</li><li>Simultaneously optimizing accuracy and explainability.</li><li>What to consider when developing a model.</li><li>The increasing demand for trustworthy AI in various sectors.</li><li>Yiannis’ advice for other leaders of AI startups.</li><li>His vision for Code4Thought in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We are building technology for testing and auditing AI systems.” — Yiannis Kanellopoulos</p><p><br></p><p>“The team that produces an AI system [is] tasked to solve a business problem. They're optimizing for solving this problem. They're not being optimized to test the model adequately [and] ensure that the model is working properly and can be trusted.” — Yiannis Kanellopoulos</p><p><br></p><p>“One can say that by performing an explainability analysis, you can use it to essentially debug the way your model works.” — Yiannis Kanellopoulos</p><p><br></p><p>“Don’t try to optimize only the business problem. The quality of your solution [and] the trustworthiness of the solution, should not be an afterthought.” — Yiannis Kanellopoulos</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/ykanellopoulos">Yiannis Kanellopoulos on LinkedIn</a></p><p><a href="https://twitter.com/ykanellopoulos">Yiannis Janellopoulos on Twitter</a></p><p><a href="https://code4thought.eu/">Code4Thought</a></p><p><a href="https://www.youtube.com/channel/UCP3oKs3SLXJTBdBlps27tmw">Code4Thought on YouTube</a> </p><p><a href="https://www.linkedin.com/company/code4thought/">Code4Thought on LinkedIn</a></p><p><a href="https://twitter.com/Code4thoughtE">Code4Thought on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>The demand for trustworthy AI is increasing across all sectors. Today’s guest is committed to mitigating biases of AI models and ensuring the responsible use of AI technology. Yiannis Kanellopoulos is the Founder and CEO of Code4Thought, the state-of-the-art AI audit solution for trustworthy AI.</p><p>In this episode, we discuss what it means for AI to be trustworthy and Yiannis explains the process by which Code4Thought evaluates the trustworthiness of AI models. We discover how biases manifest and how best to mitigate them, as well as the role of explainability in evaluating the trustworthiness of a model. Tune in to hear Yiannis’ advice on what to consider when developing a model and why the trustworthiness of your business solution should never be an afterthought.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Yiannis Kanellopoulos’ background; how it led him to create Code4Thought.</li><li>What Code4Thought does and why it’s important for the future of AI.</li><li>What it means for AI to be trustworthy.</li><li>How Code4Thought evaluates the trustworthiness of AI models.</li><li>Yiannis shares a use case evaluation in the healthcare sphere.</li><li>Why Code4Thought’s independent perspective is so important.</li><li>Yiannis explains how biases manifest in AI technology and shares mitigation strategies.</li><li>The role explainability plays in evaluating the trustworthiness of a model.</li><li>Why explainability is particularly important for financial services.</li><li>Simultaneously optimizing accuracy and explainability.</li><li>What to consider when developing a model.</li><li>The increasing demand for trustworthy AI in various sectors.</li><li>Yiannis’ advice for other leaders of AI startups.</li><li>His vision for Code4Thought in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We are building technology for testing and auditing AI systems.” — Yiannis Kanellopoulos</p><p><br></p><p>“The team that produces an AI system [is] tasked to solve a business problem. They're optimizing for solving this problem. They're not being optimized to test the model adequately [and] ensure that the model is working properly and can be trusted.” — Yiannis Kanellopoulos</p><p><br></p><p>“One can say that by performing an explainability analysis, you can use it to essentially debug the way your model works.” — Yiannis Kanellopoulos</p><p><br></p><p>“Don’t try to optimize only the business problem. The quality of your solution [and] the trustworthiness of the solution, should not be an afterthought.” — Yiannis Kanellopoulos</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/ykanellopoulos">Yiannis Kanellopoulos on LinkedIn</a></p><p><a href="https://twitter.com/ykanellopoulos">Yiannis Janellopoulos on Twitter</a></p><p><a href="https://code4thought.eu/">Code4Thought</a></p><p><a href="https://www.youtube.com/channel/UCP3oKs3SLXJTBdBlps27tmw">Code4Thought on YouTube</a> </p><p><a href="https://www.linkedin.com/company/code4thought/">Code4Thought on LinkedIn</a></p><p><a href="https://twitter.com/Code4thoughtE">Code4Thought on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 24 Apr 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/70934bcc/4044662b.mp3" length="34538677" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/OZPayDVcl9BghG7WzPv41KcW0ZVNApmXyGKpy9WbBg0/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEyODYyNTEv/MTY4MTMxNjg0My1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1435</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>The demand for trustworthy AI is increasing across all sectors. Today’s guest is committed to mitigating biases of AI models and ensuring the responsible use of AI technology. Yiannis Kanellopoulos is the Founder and CEO of Code4Thought, the state-of-the-art AI audit solution for trustworthy AI.</p><p>In this episode, we discuss what it means for AI to be trustworthy and Yiannis explains the process by which Code4Thought evaluates the trustworthiness of AI models. We discover how biases manifest and how best to mitigate them, as well as the role of explainability in evaluating the trustworthiness of a model. Tune in to hear Yiannis’ advice on what to consider when developing a model and why the trustworthiness of your business solution should never be an afterthought.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Yiannis Kanellopoulos’ background; how it led him to create Code4Thought.</li><li>What Code4Thought does and why it’s important for the future of AI.</li><li>What it means for AI to be trustworthy.</li><li>How Code4Thought evaluates the trustworthiness of AI models.</li><li>Yiannis shares a use case evaluation in the healthcare sphere.</li><li>Why Code4Thought’s independent perspective is so important.</li><li>Yiannis explains how biases manifest in AI technology and shares mitigation strategies.</li><li>The role explainability plays in evaluating the trustworthiness of a model.</li><li>Why explainability is particularly important for financial services.</li><li>Simultaneously optimizing accuracy and explainability.</li><li>What to consider when developing a model.</li><li>The increasing demand for trustworthy AI in various sectors.</li><li>Yiannis’ advice for other leaders of AI startups.</li><li>His vision for Code4Thought in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“We are building technology for testing and auditing AI systems.” — Yiannis Kanellopoulos</p><p><br></p><p>“The team that produces an AI system [is] tasked to solve a business problem. They're optimizing for solving this problem. They're not being optimized to test the model adequately [and] ensure that the model is working properly and can be trusted.” — Yiannis Kanellopoulos</p><p><br></p><p>“One can say that by performing an explainability analysis, you can use it to essentially debug the way your model works.” — Yiannis Kanellopoulos</p><p><br></p><p>“Don’t try to optimize only the business problem. The quality of your solution [and] the trustworthiness of the solution, should not be an afterthought.” — Yiannis Kanellopoulos</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/ykanellopoulos">Yiannis Kanellopoulos on LinkedIn</a></p><p><a href="https://twitter.com/ykanellopoulos">Yiannis Janellopoulos on Twitter</a></p><p><a href="https://code4thought.eu/">Code4Thought</a></p><p><a href="https://www.youtube.com/channel/UCP3oKs3SLXJTBdBlps27tmw">Code4Thought on YouTube</a> </p><p><a href="https://www.linkedin.com/company/code4thought/">Code4Thought on LinkedIn</a></p><p><a href="https://twitter.com/Code4thoughtE">Code4Thought on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/70934bcc/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Climate Risk Analysis with Josh Hacker from Jupiter Intelligence</title>
      <itunes:episode>28</itunes:episode>
      <podcast:episode>28</podcast:episode>
      <itunes:title>Climate Risk Analysis with Josh Hacker from Jupiter Intelligence</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">6c09b59b-641e-424b-819b-d3f250cf01c2</guid>
      <link>https://pixelscientia.com/podcast/climate-risk-analysis-with-josh-hacker-from-jupiter-intelligence/</link>
      <description>
        <![CDATA[<p>Climate change poses significant risks and uncertainties, with far-reaching impacts on business operations, supply chains, and financial performance. By conducting a climate risk analysis, businesses can mitigate risks, develop savvy strategies, and masterfully manage and mitigate them.</p><p>Joining me today is Josh Hacker, an atmospheric scientist whose career has spanned diverse research and science management roles. Josh is also the Co-Founder and Chief Science Officer at Jupiter Intelligence, the go-to expert for organizations seeking to strengthen their climate resilience through climate risk analytics. Josh has made his mark in both academic and laboratory settings and is helping to meet private sector demands for comprehensive and accurate information on the costs of climate change for individual companies and market sectors.</p><p>In our conversation, we discuss why climate change is relevant for companies, what they can do about it, and how Jupiter Intelligence is leading the way. We unpack the various types of climate risks, the role of machine learning, and the validation process. Learn about the various uncertainties and errors in modeling, how to correct them, the role reanalysis plays, why a multidisciplinary team of experts is essential, and more.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background about Josh and why he decided to start Jupiter Intelligence.</li><li>The work Jupiter Intelligence does and how it relates to climate change adaptation.</li><li>Why climate risks are being taken seriously by the private sector.</li><li>Discover the role of machine learning tools in assessing climate risks.</li><li>An outline of the various challenges faced when working with climate models.</li><li>Learn about his approach to model validation and why it is crucial. </li><li>Keeping a balance between model accuracy and explainability.</li><li>Find out how model uncertainty and model errors are quantified.</li><li>How bias manifests in climate models, and how to identify it.</li><li>Josh shares advice for other leaders of AI-powered startups.</li><li>What to expect from Jupiter Intelligence in the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“The vast majority of capital that we're using, and we will be using to adapt to climate change, is locked up in the private sector. But on the other hand, the government has a role in policy. These two things have an interplay that then feeds into the broader community.” — Josh Hacker</p><p><br></p><p>“The reality is there's no one way. [Validation] is a complicated process that you have to build on to make sure that things are working right all along the way.” — Josh Hacker</p><p><br></p><p>“The game in climate modeling is to actually pull the signal out from that noise. We want to pull out the slow stuff, how the climate changes, how the climate is changing relative to all the weather patterns that are going on underneath it.” — Josh Hacker</p><p><br></p><p>“Because of that historical period and the existence of these reanalyses, we have something we can do to correct the historical statistics of the climate models.” — Josh Hacker</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/josh-hacker-5a4644b4/">Josh Hacker on LinkedIn</a></p><p><a href="https://www.jupiterintel.com">Jupiter Intelligence</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Climate change poses significant risks and uncertainties, with far-reaching impacts on business operations, supply chains, and financial performance. By conducting a climate risk analysis, businesses can mitigate risks, develop savvy strategies, and masterfully manage and mitigate them.</p><p>Joining me today is Josh Hacker, an atmospheric scientist whose career has spanned diverse research and science management roles. Josh is also the Co-Founder and Chief Science Officer at Jupiter Intelligence, the go-to expert for organizations seeking to strengthen their climate resilience through climate risk analytics. Josh has made his mark in both academic and laboratory settings and is helping to meet private sector demands for comprehensive and accurate information on the costs of climate change for individual companies and market sectors.</p><p>In our conversation, we discuss why climate change is relevant for companies, what they can do about it, and how Jupiter Intelligence is leading the way. We unpack the various types of climate risks, the role of machine learning, and the validation process. Learn about the various uncertainties and errors in modeling, how to correct them, the role reanalysis plays, why a multidisciplinary team of experts is essential, and more.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background about Josh and why he decided to start Jupiter Intelligence.</li><li>The work Jupiter Intelligence does and how it relates to climate change adaptation.</li><li>Why climate risks are being taken seriously by the private sector.</li><li>Discover the role of machine learning tools in assessing climate risks.</li><li>An outline of the various challenges faced when working with climate models.</li><li>Learn about his approach to model validation and why it is crucial. </li><li>Keeping a balance between model accuracy and explainability.</li><li>Find out how model uncertainty and model errors are quantified.</li><li>How bias manifests in climate models, and how to identify it.</li><li>Josh shares advice for other leaders of AI-powered startups.</li><li>What to expect from Jupiter Intelligence in the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“The vast majority of capital that we're using, and we will be using to adapt to climate change, is locked up in the private sector. But on the other hand, the government has a role in policy. These two things have an interplay that then feeds into the broader community.” — Josh Hacker</p><p><br></p><p>“The reality is there's no one way. [Validation] is a complicated process that you have to build on to make sure that things are working right all along the way.” — Josh Hacker</p><p><br></p><p>“The game in climate modeling is to actually pull the signal out from that noise. We want to pull out the slow stuff, how the climate changes, how the climate is changing relative to all the weather patterns that are going on underneath it.” — Josh Hacker</p><p><br></p><p>“Because of that historical period and the existence of these reanalyses, we have something we can do to correct the historical statistics of the climate models.” — Josh Hacker</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/josh-hacker-5a4644b4/">Josh Hacker on LinkedIn</a></p><p><a href="https://www.jupiterintel.com">Jupiter Intelligence</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 17 Apr 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/27ce9a78/0a4de031.mp3" length="39986244" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/ZJ_v0dNn5Jcc-hOc7WXKJ-1jc1k3bLnnegmEgzm1Tco/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEyODYyMjUv/MTY4MTM0NzYxOS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1661</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Climate change poses significant risks and uncertainties, with far-reaching impacts on business operations, supply chains, and financial performance. By conducting a climate risk analysis, businesses can mitigate risks, develop savvy strategies, and masterfully manage and mitigate them.</p><p>Joining me today is Josh Hacker, an atmospheric scientist whose career has spanned diverse research and science management roles. Josh is also the Co-Founder and Chief Science Officer at Jupiter Intelligence, the go-to expert for organizations seeking to strengthen their climate resilience through climate risk analytics. Josh has made his mark in both academic and laboratory settings and is helping to meet private sector demands for comprehensive and accurate information on the costs of climate change for individual companies and market sectors.</p><p>In our conversation, we discuss why climate change is relevant for companies, what they can do about it, and how Jupiter Intelligence is leading the way. We unpack the various types of climate risks, the role of machine learning, and the validation process. Learn about the various uncertainties and errors in modeling, how to correct them, the role reanalysis plays, why a multidisciplinary team of experts is essential, and more.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Background about Josh and why he decided to start Jupiter Intelligence.</li><li>The work Jupiter Intelligence does and how it relates to climate change adaptation.</li><li>Why climate risks are being taken seriously by the private sector.</li><li>Discover the role of machine learning tools in assessing climate risks.</li><li>An outline of the various challenges faced when working with climate models.</li><li>Learn about his approach to model validation and why it is crucial. </li><li>Keeping a balance between model accuracy and explainability.</li><li>Find out how model uncertainty and model errors are quantified.</li><li>How bias manifests in climate models, and how to identify it.</li><li>Josh shares advice for other leaders of AI-powered startups.</li><li>What to expect from Jupiter Intelligence in the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“The vast majority of capital that we're using, and we will be using to adapt to climate change, is locked up in the private sector. But on the other hand, the government has a role in policy. These two things have an interplay that then feeds into the broader community.” — Josh Hacker</p><p><br></p><p>“The reality is there's no one way. [Validation] is a complicated process that you have to build on to make sure that things are working right all along the way.” — Josh Hacker</p><p><br></p><p>“The game in climate modeling is to actually pull the signal out from that noise. We want to pull out the slow stuff, how the climate changes, how the climate is changing relative to all the weather patterns that are going on underneath it.” — Josh Hacker</p><p><br></p><p>“Because of that historical period and the existence of these reanalyses, we have something we can do to correct the historical statistics of the climate models.” — Josh Hacker</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/josh-hacker-5a4644b4/">Josh Hacker on LinkedIn</a></p><p><a href="https://www.jupiterintel.com">Jupiter Intelligence</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/27ce9a78/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Advancing Treatments for Lung Diseases with Eva van Rikxoort from Thirona</title>
      <itunes:episode>27</itunes:episode>
      <podcast:episode>27</podcast:episode>
      <itunes:title>Advancing Treatments for Lung Diseases with Eva van Rikxoort from Thirona</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">40b40366-cfc9-45b5-8d8f-b5ab3fcdaa80</guid>
      <link>https://pixelscientia.com/podcast/advancing-treatments-for-lung-diseases-with-eva-van-rikxoort-from-thirona/</link>
      <description>
        <![CDATA[<p>Today’s guest uses AI for the personalized treatment of early-stage lung cancers and other lung diseases that need localized treatment. Eva van Rikxoort, a scientist in medical image analysis and the CEO and Founder of Thirona, started studying AI 20 years ago. Her interest in lung imaging and the translation of it with the help of AI led her to found her company which develops medical image analysis software based on deep learning.</p><p>In this episode, she explains more about what Thirona does and the challenges they encounter when working with CT images. You’ll learn about the importance of online learning components for the future of AI applications for medical purposes and how the team at Thirona ensures that the technology it develops provides the right assistance to doctors, patients, and researchers. Tune in to find out more about the role of AI in the future of personalized medicine and lung disease treatments.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Eva van Rikxoort, her experience in AI, and how she founded Thirona.</li><li>What Thirona does and why this is important for treating disease.  </li><li>How the company uses machine learning in multiple ways for different models to predict various outputs. </li><li>The types of challenges Thirona encounters when working with CT images.</li><li>How they deal with situations where clinicians disagree or annotations are not reliable. </li><li>How the regulatory process affects the way Thirona develops machine learning models.</li><li>The importance of online learning components for the future of AI applications for medical purposes. </li><li>How Thirona ensures that the technology it develops provides the right assistance to doctors, patients, and researchers. </li><li>Approaches to recruiting and onboarding that have been most successful for Eva’s team.</li><li>Eva’s advice to other leaders of AI-powered startups about trusting your gut and asking for help. </li><li>How Eva foresees the impact of Thirona in the future in terms of personalized medicine and lung disease treatments.</li></ul><p><strong>Quotes:</strong></p><p>“[At Thirona] we don't make software that's aimed at radiology, even though medical image analysis is very much a radiology thing, but we really focus on breakthrough treatments that are being developed both by pharma companies but also by biotech companies.” — Eva van Rikxoort</p><p><br></p><p>“If you look technically at AI, we could be learning every single day from what we do. I mean, we as [people] do, but our AI models learn in release cycles instead of on a daily basis.” — Eva van Rikxoort</p><p><br></p><p>“Next to making something that's highly innovative, you’re also opening a market to this innovation. It's a twofold thing.” — Eva van Rikxoort</p><p><br></p><p>“Don't be afraid to ask anyone for help. For example, opinion leaders, doctors, they are very, very happy to help. They really love the innovations in their field.” — Eva van Rikxoort</p><p><br></p><p>“I really think the impact of Thirona will be the personalized medicine, the personalized treatment of early-stage lung cancers or any other lung disease that needs localized treatment.” — Eva van Rikxoort</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/evavanrikxoort/?originalSubdomain=nl">Eva van Rikxoort</a></p><p><a href="https://thirona.eu/">Thirona</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Today’s guest uses AI for the personalized treatment of early-stage lung cancers and other lung diseases that need localized treatment. Eva van Rikxoort, a scientist in medical image analysis and the CEO and Founder of Thirona, started studying AI 20 years ago. Her interest in lung imaging and the translation of it with the help of AI led her to found her company which develops medical image analysis software based on deep learning.</p><p>In this episode, she explains more about what Thirona does and the challenges they encounter when working with CT images. You’ll learn about the importance of online learning components for the future of AI applications for medical purposes and how the team at Thirona ensures that the technology it develops provides the right assistance to doctors, patients, and researchers. Tune in to find out more about the role of AI in the future of personalized medicine and lung disease treatments.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Eva van Rikxoort, her experience in AI, and how she founded Thirona.</li><li>What Thirona does and why this is important for treating disease.  </li><li>How the company uses machine learning in multiple ways for different models to predict various outputs. </li><li>The types of challenges Thirona encounters when working with CT images.</li><li>How they deal with situations where clinicians disagree or annotations are not reliable. </li><li>How the regulatory process affects the way Thirona develops machine learning models.</li><li>The importance of online learning components for the future of AI applications for medical purposes. </li><li>How Thirona ensures that the technology it develops provides the right assistance to doctors, patients, and researchers. </li><li>Approaches to recruiting and onboarding that have been most successful for Eva’s team.</li><li>Eva’s advice to other leaders of AI-powered startups about trusting your gut and asking for help. </li><li>How Eva foresees the impact of Thirona in the future in terms of personalized medicine and lung disease treatments.</li></ul><p><strong>Quotes:</strong></p><p>“[At Thirona] we don't make software that's aimed at radiology, even though medical image analysis is very much a radiology thing, but we really focus on breakthrough treatments that are being developed both by pharma companies but also by biotech companies.” — Eva van Rikxoort</p><p><br></p><p>“If you look technically at AI, we could be learning every single day from what we do. I mean, we as [people] do, but our AI models learn in release cycles instead of on a daily basis.” — Eva van Rikxoort</p><p><br></p><p>“Next to making something that's highly innovative, you’re also opening a market to this innovation. It's a twofold thing.” — Eva van Rikxoort</p><p><br></p><p>“Don't be afraid to ask anyone for help. For example, opinion leaders, doctors, they are very, very happy to help. They really love the innovations in their field.” — Eva van Rikxoort</p><p><br></p><p>“I really think the impact of Thirona will be the personalized medicine, the personalized treatment of early-stage lung cancers or any other lung disease that needs localized treatment.” — Eva van Rikxoort</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/evavanrikxoort/?originalSubdomain=nl">Eva van Rikxoort</a></p><p><a href="https://thirona.eu/">Thirona</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 10 Apr 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/f931d84a/1511878a.mp3" length="28396534" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/ou9xwskiPf7WzLrXefG0BZeCQp7xB4sKwnzAbJsLiYw/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEyNDAzODIv/MTY3ODQwMTQyNi1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1179</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Today’s guest uses AI for the personalized treatment of early-stage lung cancers and other lung diseases that need localized treatment. Eva van Rikxoort, a scientist in medical image analysis and the CEO and Founder of Thirona, started studying AI 20 years ago. Her interest in lung imaging and the translation of it with the help of AI led her to found her company which develops medical image analysis software based on deep learning.</p><p>In this episode, she explains more about what Thirona does and the challenges they encounter when working with CT images. You’ll learn about the importance of online learning components for the future of AI applications for medical purposes and how the team at Thirona ensures that the technology it develops provides the right assistance to doctors, patients, and researchers. Tune in to find out more about the role of AI in the future of personalized medicine and lung disease treatments.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Eva van Rikxoort, her experience in AI, and how she founded Thirona.</li><li>What Thirona does and why this is important for treating disease.  </li><li>How the company uses machine learning in multiple ways for different models to predict various outputs. </li><li>The types of challenges Thirona encounters when working with CT images.</li><li>How they deal with situations where clinicians disagree or annotations are not reliable. </li><li>How the regulatory process affects the way Thirona develops machine learning models.</li><li>The importance of online learning components for the future of AI applications for medical purposes. </li><li>How Thirona ensures that the technology it develops provides the right assistance to doctors, patients, and researchers. </li><li>Approaches to recruiting and onboarding that have been most successful for Eva’s team.</li><li>Eva’s advice to other leaders of AI-powered startups about trusting your gut and asking for help. </li><li>How Eva foresees the impact of Thirona in the future in terms of personalized medicine and lung disease treatments.</li></ul><p><strong>Quotes:</strong></p><p>“[At Thirona] we don't make software that's aimed at radiology, even though medical image analysis is very much a radiology thing, but we really focus on breakthrough treatments that are being developed both by pharma companies but also by biotech companies.” — Eva van Rikxoort</p><p><br></p><p>“If you look technically at AI, we could be learning every single day from what we do. I mean, we as [people] do, but our AI models learn in release cycles instead of on a daily basis.” — Eva van Rikxoort</p><p><br></p><p>“Next to making something that's highly innovative, you’re also opening a market to this innovation. It's a twofold thing.” — Eva van Rikxoort</p><p><br></p><p>“Don't be afraid to ask anyone for help. For example, opinion leaders, doctors, they are very, very happy to help. They really love the innovations in their field.” — Eva van Rikxoort</p><p><br></p><p>“I really think the impact of Thirona will be the personalized medicine, the personalized treatment of early-stage lung cancers or any other lung disease that needs localized treatment.” — Eva van Rikxoort</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/evavanrikxoort/?originalSubdomain=nl">Eva van Rikxoort</a></p><p><a href="https://thirona.eu/">Thirona</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/f931d84a/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Eliminating Food Waste with Nathan Fenner from Afresh</title>
      <itunes:episode>26</itunes:episode>
      <podcast:episode>26</podcast:episode>
      <itunes:title>Eliminating Food Waste with Nathan Fenner from Afresh</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">4a2c6dc7-b984-49fe-ac17-fa8e40aaa721</guid>
      <link>https://pixelscientia.com/podcast/eliminating-food-waste-with-nathan-fenner-form-afresh/</link>
      <description>
        <![CDATA[<p>The climate crisis is one of the most important and complex challenges of our age, and solving it will require collaboration, innovation, and commitment. According to Project Drawdown (a non-profit organization that functions as a top resource for climate solutions), one of the key drivers of climate change that we can meaningfully address as a society, is food waste.</p><p>In today’s episode, we learn about Afresh, a company that is leading the way in providing food waste solutions to grocers across America by creating optimized food orders through pioneering AI and machine learning solutions. You’ll hear from Afresh Co-Founder, Nathan Fenner, as we discuss the founding mission behind the company and how they are leveraging AI in a way that is fundamentally different from other established legacy companies in their field. We discuss the challenges of working with perishable products, how it results in noisy data, and why it’s so important for Afresh technology to not only provide predictions but also make decisions in the face of uncertainty.</p><p>Today’s conversation unpacks a particularly exciting area of AI and demonstrates how advancements in the field are paving the way for impactful climate solutions. Be sure to tune in to learn about the real-world impact of AI innovation in an area where we need it most urgently!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Get to know today’s guest, Nathan Fenner, and how he co-founded Afresh.</li><li>Why reducing food waste is a key part of mitigating climate change.</li><li>How Afresh is helping the grocery industry optimize supply chains for perishable products.</li><li>The role that machine learning plays in Afresh’s technology.</li><li>An overview of the three main sources of data that they feed into their system.</li><li>The biggest challenges they experience with their data sources.</li><li>Understanding how past retail system solutions were built with non-perishable items in mind.</li><li>Why perishable items result in extremely noisy data.</li><li>The challenges that noisy data poses to machine learning models.</li><li>How Afresh is addressing the challenges inherent to noisy data.</li><li>What differentiates Afresh from other established legacy companies in their field.</li><li>How Afresh is leveraging AI to make decisions, rather than simply providing a forecast.</li><li>How Afresh measures the impact of their technology on profits, food waste, and the planet.</li><li>Unpacking the difficulty in finding, hiring, and attracting machine learning specialists.</li><li>The confluence of factors that are helping Afresh attract top talent.</li><li>What Nathan is most excited about for the future of Afresh.</li></ul><p><strong>Quotes:</strong></p><p>“We're hyper-focused on building supply chain software to optimize all the perishable supply chains in retail. The big outcome of optimizing that supply chain is that we dramatically reduce food waste. Food waste is one of the biggest macroscopic contributors to climate change.” — Nathan Fenner</p><p><br></p><p>“Good machine learning is key to writing an optimal order that maximizes profit, but also minimizes waste.” — Nathan Fenner</p><p><br></p><p>“All the technology that had been built for the grocery industry, and that was being used in supply chain and inventory management, had all been built for the non-fresh side of the business. It had all been built for things that come in boxes that have barcodes.” — Nathan Fenner</p><p><br></p><p>“We leverage AI in a fundamentally different way. We definitely do forecasting, but the critical thing we're doing is really decision-making under uncertainty. The output from our models is actually a decision as opposed to simply a forecast.” — Nathan Fenner</p><p><br></p><p>“Leveraging this more frontier area of machine learning has allowed us to make really good decisions in a really uncertain environment.” — Nathan Fenner</p><p><br></p><p>“If we can build a technology that reduces food waste by 50%, it will become uneconomic for grocers to not use our technology (or a similar technology) that produces that much in cost savings.” — Nathan Fenner</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/nathan-fenner/">Nathan Fenner on LinkedIn</a></p><p><a href="https://www.afresh.com/">Afresh</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>The climate crisis is one of the most important and complex challenges of our age, and solving it will require collaboration, innovation, and commitment. According to Project Drawdown (a non-profit organization that functions as a top resource for climate solutions), one of the key drivers of climate change that we can meaningfully address as a society, is food waste.</p><p>In today’s episode, we learn about Afresh, a company that is leading the way in providing food waste solutions to grocers across America by creating optimized food orders through pioneering AI and machine learning solutions. You’ll hear from Afresh Co-Founder, Nathan Fenner, as we discuss the founding mission behind the company and how they are leveraging AI in a way that is fundamentally different from other established legacy companies in their field. We discuss the challenges of working with perishable products, how it results in noisy data, and why it’s so important for Afresh technology to not only provide predictions but also make decisions in the face of uncertainty.</p><p>Today’s conversation unpacks a particularly exciting area of AI and demonstrates how advancements in the field are paving the way for impactful climate solutions. Be sure to tune in to learn about the real-world impact of AI innovation in an area where we need it most urgently!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Get to know today’s guest, Nathan Fenner, and how he co-founded Afresh.</li><li>Why reducing food waste is a key part of mitigating climate change.</li><li>How Afresh is helping the grocery industry optimize supply chains for perishable products.</li><li>The role that machine learning plays in Afresh’s technology.</li><li>An overview of the three main sources of data that they feed into their system.</li><li>The biggest challenges they experience with their data sources.</li><li>Understanding how past retail system solutions were built with non-perishable items in mind.</li><li>Why perishable items result in extremely noisy data.</li><li>The challenges that noisy data poses to machine learning models.</li><li>How Afresh is addressing the challenges inherent to noisy data.</li><li>What differentiates Afresh from other established legacy companies in their field.</li><li>How Afresh is leveraging AI to make decisions, rather than simply providing a forecast.</li><li>How Afresh measures the impact of their technology on profits, food waste, and the planet.</li><li>Unpacking the difficulty in finding, hiring, and attracting machine learning specialists.</li><li>The confluence of factors that are helping Afresh attract top talent.</li><li>What Nathan is most excited about for the future of Afresh.</li></ul><p><strong>Quotes:</strong></p><p>“We're hyper-focused on building supply chain software to optimize all the perishable supply chains in retail. The big outcome of optimizing that supply chain is that we dramatically reduce food waste. Food waste is one of the biggest macroscopic contributors to climate change.” — Nathan Fenner</p><p><br></p><p>“Good machine learning is key to writing an optimal order that maximizes profit, but also minimizes waste.” — Nathan Fenner</p><p><br></p><p>“All the technology that had been built for the grocery industry, and that was being used in supply chain and inventory management, had all been built for the non-fresh side of the business. It had all been built for things that come in boxes that have barcodes.” — Nathan Fenner</p><p><br></p><p>“We leverage AI in a fundamentally different way. We definitely do forecasting, but the critical thing we're doing is really decision-making under uncertainty. The output from our models is actually a decision as opposed to simply a forecast.” — Nathan Fenner</p><p><br></p><p>“Leveraging this more frontier area of machine learning has allowed us to make really good decisions in a really uncertain environment.” — Nathan Fenner</p><p><br></p><p>“If we can build a technology that reduces food waste by 50%, it will become uneconomic for grocers to not use our technology (or a similar technology) that produces that much in cost savings.” — Nathan Fenner</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/nathan-fenner/">Nathan Fenner on LinkedIn</a></p><p><a href="https://www.afresh.com/">Afresh</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 03 Apr 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/bbbfce51/0c90136a.mp3" length="37233305" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/r3sNMdMWA49GaibRDhdbmP6VLr_OmZR-Q42BJyT-oo8/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEyNDAzNzgv/MTY3ODQwMTI3Mi1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1545</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>The climate crisis is one of the most important and complex challenges of our age, and solving it will require collaboration, innovation, and commitment. According to Project Drawdown (a non-profit organization that functions as a top resource for climate solutions), one of the key drivers of climate change that we can meaningfully address as a society, is food waste.</p><p>In today’s episode, we learn about Afresh, a company that is leading the way in providing food waste solutions to grocers across America by creating optimized food orders through pioneering AI and machine learning solutions. You’ll hear from Afresh Co-Founder, Nathan Fenner, as we discuss the founding mission behind the company and how they are leveraging AI in a way that is fundamentally different from other established legacy companies in their field. We discuss the challenges of working with perishable products, how it results in noisy data, and why it’s so important for Afresh technology to not only provide predictions but also make decisions in the face of uncertainty.</p><p>Today’s conversation unpacks a particularly exciting area of AI and demonstrates how advancements in the field are paving the way for impactful climate solutions. Be sure to tune in to learn about the real-world impact of AI innovation in an area where we need it most urgently!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Get to know today’s guest, Nathan Fenner, and how he co-founded Afresh.</li><li>Why reducing food waste is a key part of mitigating climate change.</li><li>How Afresh is helping the grocery industry optimize supply chains for perishable products.</li><li>The role that machine learning plays in Afresh’s technology.</li><li>An overview of the three main sources of data that they feed into their system.</li><li>The biggest challenges they experience with their data sources.</li><li>Understanding how past retail system solutions were built with non-perishable items in mind.</li><li>Why perishable items result in extremely noisy data.</li><li>The challenges that noisy data poses to machine learning models.</li><li>How Afresh is addressing the challenges inherent to noisy data.</li><li>What differentiates Afresh from other established legacy companies in their field.</li><li>How Afresh is leveraging AI to make decisions, rather than simply providing a forecast.</li><li>How Afresh measures the impact of their technology on profits, food waste, and the planet.</li><li>Unpacking the difficulty in finding, hiring, and attracting machine learning specialists.</li><li>The confluence of factors that are helping Afresh attract top talent.</li><li>What Nathan is most excited about for the future of Afresh.</li></ul><p><strong>Quotes:</strong></p><p>“We're hyper-focused on building supply chain software to optimize all the perishable supply chains in retail. The big outcome of optimizing that supply chain is that we dramatically reduce food waste. Food waste is one of the biggest macroscopic contributors to climate change.” — Nathan Fenner</p><p><br></p><p>“Good machine learning is key to writing an optimal order that maximizes profit, but also minimizes waste.” — Nathan Fenner</p><p><br></p><p>“All the technology that had been built for the grocery industry, and that was being used in supply chain and inventory management, had all been built for the non-fresh side of the business. It had all been built for things that come in boxes that have barcodes.” — Nathan Fenner</p><p><br></p><p>“We leverage AI in a fundamentally different way. We definitely do forecasting, but the critical thing we're doing is really decision-making under uncertainty. The output from our models is actually a decision as opposed to simply a forecast.” — Nathan Fenner</p><p><br></p><p>“Leveraging this more frontier area of machine learning has allowed us to make really good decisions in a really uncertain environment.” — Nathan Fenner</p><p><br></p><p>“If we can build a technology that reduces food waste by 50%, it will become uneconomic for grocers to not use our technology (or a similar technology) that produces that much in cost savings.” — Nathan Fenner</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/nathan-fenner/">Nathan Fenner on LinkedIn</a></p><p><a href="https://www.afresh.com/">Afresh</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/bbbfce51/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Visualizing Cancer Margins with Ersin Bayram from Perimeter Medical Imaging AI</title>
      <itunes:episode>25</itunes:episode>
      <podcast:episode>25</podcast:episode>
      <itunes:title>Visualizing Cancer Margins with Ersin Bayram from Perimeter Medical Imaging AI</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">ab097647-b121-4b38-a7cb-b999ce02716e</guid>
      <link>https://pixelscientia.com/podcast/visualizing-cancer-margins-with-ersin-bayram-from-perimeter-medical-imaging-ai/</link>
      <description>
        <![CDATA[<p>Today, I am joined by Ersin Bayram, the director of AI and data science at Perimeter Medical Imaging AI, to talk about tissue imaging during cancer surgery. This technology provides real-time margin visualization to surgeons intraoperatively vs. waiting days later for the pathology report, which remains the gold standard for confirming margin status. The surgical oncologists’ goal is to achieve clean margins on excised tissue during the initial surgery and reduce the chance of the patient requiring a second surgery or leaving some cancerous tissue behind. The next generation of this device uses AI and big data to speed image interpretation.</p><p>Tuning in, you’ll hear about the role of machine learning in this technology, how they gather and annotate data in order to train the system, and the types of challenges encountered when working with OCT imagery. We discuss the role of model explainability, whether or not model accuracy is more critical, and how classic activation maps are used for improving the model. We also talk about regulatory processes as well as Ersin's approach to recruiting and onboarding before he gives his advice to other leaders of AI-powered startups. For all this and more, tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Ersin Bayram and his role at Perimeter Medical Imaging AI. </li><li>What Perimeter does and why this is important for cancer outcomes. </li><li>The role of machine learning in this technology. </li><li>How this technology segments out potential areas of concern on the OCT scans and displays this to the surgeon.  </li><li>How Perimeter Medical Imaging AI gathers and annotates data in order to train the system.</li><li>The types of challenges they encounter when working with OCT imagery. </li><li>The role of model explainability and whether or not model accuracy is more critical. </li><li>How classic activation maps are used for improving the model and not shown to the clinician.  </li><li>How the regulatory process affects the way Ersin and his team develop machine learning models. </li><li>Approaches to recruiting and onboarding that have been most successful. </li><li>Ersin’s advice to other leaders of AI-powered startups. </li><li>How he foresees the impact of Perimeter three to five years from now.</li></ul><p><strong>Quotes:</strong></p><p>“I can still work on oncology, making an impact on a really deadly disease, and also start focusing entirely on the AI side and the data science aspect. That was an easy decision.” — Ersin Bayram</p><p><br></p><p>“The surgeon might be able to look into the images and then they might be able to go back and take extra shaves or there might be also benefits not to carve out healthy tissue more than needed.” — Ersin Bayram</p><p><br></p><p>“If you find the talent that has the medical imaging background and they have the foundational skills, technical thinking, and they have basic Python skills, we can train them and we can ramp them up to become good AI scientists.” — Ersin Bayram</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/ersin-bayram-phd-1a8b51/">Ersin Bayram</a></p><p><a href="https://perimetermed.com/">Perimeter Medical Imaging AI</a></p><p><strong>Disclaimer:</strong><br>Perimeter B-Series OCT is not available for sale in the United States. CAUTION – Investigational device. Limited by U.S. law to investigational use.</p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Today, I am joined by Ersin Bayram, the director of AI and data science at Perimeter Medical Imaging AI, to talk about tissue imaging during cancer surgery. This technology provides real-time margin visualization to surgeons intraoperatively vs. waiting days later for the pathology report, which remains the gold standard for confirming margin status. The surgical oncologists’ goal is to achieve clean margins on excised tissue during the initial surgery and reduce the chance of the patient requiring a second surgery or leaving some cancerous tissue behind. The next generation of this device uses AI and big data to speed image interpretation.</p><p>Tuning in, you’ll hear about the role of machine learning in this technology, how they gather and annotate data in order to train the system, and the types of challenges encountered when working with OCT imagery. We discuss the role of model explainability, whether or not model accuracy is more critical, and how classic activation maps are used for improving the model. We also talk about regulatory processes as well as Ersin's approach to recruiting and onboarding before he gives his advice to other leaders of AI-powered startups. For all this and more, tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Ersin Bayram and his role at Perimeter Medical Imaging AI. </li><li>What Perimeter does and why this is important for cancer outcomes. </li><li>The role of machine learning in this technology. </li><li>How this technology segments out potential areas of concern on the OCT scans and displays this to the surgeon.  </li><li>How Perimeter Medical Imaging AI gathers and annotates data in order to train the system.</li><li>The types of challenges they encounter when working with OCT imagery. </li><li>The role of model explainability and whether or not model accuracy is more critical. </li><li>How classic activation maps are used for improving the model and not shown to the clinician.  </li><li>How the regulatory process affects the way Ersin and his team develop machine learning models. </li><li>Approaches to recruiting and onboarding that have been most successful. </li><li>Ersin’s advice to other leaders of AI-powered startups. </li><li>How he foresees the impact of Perimeter three to five years from now.</li></ul><p><strong>Quotes:</strong></p><p>“I can still work on oncology, making an impact on a really deadly disease, and also start focusing entirely on the AI side and the data science aspect. That was an easy decision.” — Ersin Bayram</p><p><br></p><p>“The surgeon might be able to look into the images and then they might be able to go back and take extra shaves or there might be also benefits not to carve out healthy tissue more than needed.” — Ersin Bayram</p><p><br></p><p>“If you find the talent that has the medical imaging background and they have the foundational skills, technical thinking, and they have basic Python skills, we can train them and we can ramp them up to become good AI scientists.” — Ersin Bayram</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/ersin-bayram-phd-1a8b51/">Ersin Bayram</a></p><p><a href="https://perimetermed.com/">Perimeter Medical Imaging AI</a></p><p><strong>Disclaimer:</strong><br>Perimeter B-Series OCT is not available for sale in the United States. CAUTION – Investigational device. Limited by U.S. law to investigational use.</p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 27 Mar 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/96d81a33/0c68f629.mp3" length="15672953" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/j5lwJ7ULOa1IPNDPyLFJj6-nu7WVgZZ1lzGVu1n_1SM/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEyNDAzNzUv/MTY3ODQwMTExOC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>976</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Today, I am joined by Ersin Bayram, the director of AI and data science at Perimeter Medical Imaging AI, to talk about tissue imaging during cancer surgery. This technology provides real-time margin visualization to surgeons intraoperatively vs. waiting days later for the pathology report, which remains the gold standard for confirming margin status. The surgical oncologists’ goal is to achieve clean margins on excised tissue during the initial surgery and reduce the chance of the patient requiring a second surgery or leaving some cancerous tissue behind. The next generation of this device uses AI and big data to speed image interpretation.</p><p>Tuning in, you’ll hear about the role of machine learning in this technology, how they gather and annotate data in order to train the system, and the types of challenges encountered when working with OCT imagery. We discuss the role of model explainability, whether or not model accuracy is more critical, and how classic activation maps are used for improving the model. We also talk about regulatory processes as well as Ersin's approach to recruiting and onboarding before he gives his advice to other leaders of AI-powered startups. For all this and more, tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Ersin Bayram and his role at Perimeter Medical Imaging AI. </li><li>What Perimeter does and why this is important for cancer outcomes. </li><li>The role of machine learning in this technology. </li><li>How this technology segments out potential areas of concern on the OCT scans and displays this to the surgeon.  </li><li>How Perimeter Medical Imaging AI gathers and annotates data in order to train the system.</li><li>The types of challenges they encounter when working with OCT imagery. </li><li>The role of model explainability and whether or not model accuracy is more critical. </li><li>How classic activation maps are used for improving the model and not shown to the clinician.  </li><li>How the regulatory process affects the way Ersin and his team develop machine learning models. </li><li>Approaches to recruiting and onboarding that have been most successful. </li><li>Ersin’s advice to other leaders of AI-powered startups. </li><li>How he foresees the impact of Perimeter three to five years from now.</li></ul><p><strong>Quotes:</strong></p><p>“I can still work on oncology, making an impact on a really deadly disease, and also start focusing entirely on the AI side and the data science aspect. That was an easy decision.” — Ersin Bayram</p><p><br></p><p>“The surgeon might be able to look into the images and then they might be able to go back and take extra shaves or there might be also benefits not to carve out healthy tissue more than needed.” — Ersin Bayram</p><p><br></p><p>“If you find the talent that has the medical imaging background and they have the foundational skills, technical thinking, and they have basic Python skills, we can train them and we can ramp them up to become good AI scientists.” — Ersin Bayram</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/ersin-bayram-phd-1a8b51/">Ersin Bayram</a></p><p><a href="https://perimetermed.com/">Perimeter Medical Imaging AI</a></p><p><strong>Disclaimer:</strong><br>Perimeter B-Series OCT is not available for sale in the United States. CAUTION – Investigational device. Limited by U.S. law to investigational use.</p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/96d81a33/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Transitioning to a Zero Carbon Economy with Matt Gray from TransitionZero</title>
      <itunes:episode>24</itunes:episode>
      <podcast:episode>24</podcast:episode>
      <itunes:title>Transitioning to a Zero Carbon Economy with Matt Gray from TransitionZero</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">dbcd16ca-a145-4c48-9918-b79f6794bc39</guid>
      <link>https://pixelscientia.com/podcast/transitioning-to-a-zero-carbon-economy-with-matt-gray-from-transitionzero/</link>
      <description>
        <![CDATA[<p>Matt Gray is the co-founder and CEO of TransitionZero, a not-for-profit analyzing financial data to manage the decline of fossil fuels and support the shift to zero carbon growth opportunities. During today’s conversation, we talk about how TransitionZero leverages the power of machine learning to create a positive impact on the world. We touch on the mechanics before acknowledging the indispensable role of domain expertise in creating success. We talk about measuring impact in a not-for-profit context, and zoom in on TransitionZero’s mission and projections for the future. You’ll hear some examples of the incredible change that has taken place as a result of the TransitionZero's work, what some of their challenges have looked like, and the exciting inner workings taking place at TransitionZero today. Thanks for tuning in! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Matt Gray, co-founder and CEO of TransitionZero. </li><li>What TransitionZero does and why it is important for reducing emissions.</li><li>The role of machine learning in data analysis.</li><li>TransitionZero and the Climate Trace organization. </li><li>How TransitionZero validates models.</li><li>Why domain expertise is indispensable.</li><li>How the approach differs between different facilities.</li><li>Measuring impact in accordance with TransitionZero’s mission.</li><li>Examples of the impact the organization has had in China and Japan.</li><li>The challenge of finding the right people to join the team. </li><li>Policies that enhance non-salary benefits to the team.</li><li>His advice not to lead with AI.</li><li>How TransitionZero is approaching the Future Energy Outlook Project. </li><li>His hope for the future of TransitionZero’s impact.</li></ul><p><strong>Quotes:</strong></p><p>“TransitionZero is a climate data analytics not-for-profit, co-founded in 2020.” — Matt Gray</p><p><br>“Another application we are just embarking on is using data science to estimate the productivity of wind and solar assets.” — Matt Gray</p><p><br></p><p>“One thing we have learned over the last two to three years is that domain expertise is indispensable when you’re building models.” — Matt Gray</p><p><br></p><p>“Our mission is for affordable and dependable energy for everyone.” — Matt Gray</p><p><br></p><p>“Don’t lead with AI. Lead with the use case and the problem that you are solving.” — Matt Gray</p><p><br></p><p><strong>Links:</strong></p><p><br></p><p><a href="https://twitter.com/matthewcgray">Matt Gray on Twitter<strong><br></strong></a><a href="https://www.linkedin.com/in/mattcgray/">Matt Gray on LinkedIn</a></p><p><a href="https://www.transitionzero.org/">TransitionZero</a></p><p><a href="https://twitter.com/TransitionZero">TransitionZero on Twitter</a></p><p><a href="https://www.linkedin.com/company/transitionzero/">TransitionZero on LinkedIn</a></p><p><a href="https://github.com/transition-zero">TransitionZero on Github</a></p><p><a href="https://climatetrace.org/">Climate Trace</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Matt Gray is the co-founder and CEO of TransitionZero, a not-for-profit analyzing financial data to manage the decline of fossil fuels and support the shift to zero carbon growth opportunities. During today’s conversation, we talk about how TransitionZero leverages the power of machine learning to create a positive impact on the world. We touch on the mechanics before acknowledging the indispensable role of domain expertise in creating success. We talk about measuring impact in a not-for-profit context, and zoom in on TransitionZero’s mission and projections for the future. You’ll hear some examples of the incredible change that has taken place as a result of the TransitionZero's work, what some of their challenges have looked like, and the exciting inner workings taking place at TransitionZero today. Thanks for tuning in! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Matt Gray, co-founder and CEO of TransitionZero. </li><li>What TransitionZero does and why it is important for reducing emissions.</li><li>The role of machine learning in data analysis.</li><li>TransitionZero and the Climate Trace organization. </li><li>How TransitionZero validates models.</li><li>Why domain expertise is indispensable.</li><li>How the approach differs between different facilities.</li><li>Measuring impact in accordance with TransitionZero’s mission.</li><li>Examples of the impact the organization has had in China and Japan.</li><li>The challenge of finding the right people to join the team. </li><li>Policies that enhance non-salary benefits to the team.</li><li>His advice not to lead with AI.</li><li>How TransitionZero is approaching the Future Energy Outlook Project. </li><li>His hope for the future of TransitionZero’s impact.</li></ul><p><strong>Quotes:</strong></p><p>“TransitionZero is a climate data analytics not-for-profit, co-founded in 2020.” — Matt Gray</p><p><br>“Another application we are just embarking on is using data science to estimate the productivity of wind and solar assets.” — Matt Gray</p><p><br></p><p>“One thing we have learned over the last two to three years is that domain expertise is indispensable when you’re building models.” — Matt Gray</p><p><br></p><p>“Our mission is for affordable and dependable energy for everyone.” — Matt Gray</p><p><br></p><p>“Don’t lead with AI. Lead with the use case and the problem that you are solving.” — Matt Gray</p><p><br></p><p><strong>Links:</strong></p><p><br></p><p><a href="https://twitter.com/matthewcgray">Matt Gray on Twitter<strong><br></strong></a><a href="https://www.linkedin.com/in/mattcgray/">Matt Gray on LinkedIn</a></p><p><a href="https://www.transitionzero.org/">TransitionZero</a></p><p><a href="https://twitter.com/TransitionZero">TransitionZero on Twitter</a></p><p><a href="https://www.linkedin.com/company/transitionzero/">TransitionZero on LinkedIn</a></p><p><a href="https://github.com/transition-zero">TransitionZero on Github</a></p><p><a href="https://climatetrace.org/">Climate Trace</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 20 Mar 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/ad66c3a9/ed2c89d2.mp3" length="37144234" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/CvNPPRfRJz4MTmZ1gY6KH6ZH5rSNkHJ7wZ_mRS2W5nc/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEyNDAzNzIv/MTY3ODQwMDg5OS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1542</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Matt Gray is the co-founder and CEO of TransitionZero, a not-for-profit analyzing financial data to manage the decline of fossil fuels and support the shift to zero carbon growth opportunities. During today’s conversation, we talk about how TransitionZero leverages the power of machine learning to create a positive impact on the world. We touch on the mechanics before acknowledging the indispensable role of domain expertise in creating success. We talk about measuring impact in a not-for-profit context, and zoom in on TransitionZero’s mission and projections for the future. You’ll hear some examples of the incredible change that has taken place as a result of the TransitionZero's work, what some of their challenges have looked like, and the exciting inner workings taking place at TransitionZero today. Thanks for tuning in! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Matt Gray, co-founder and CEO of TransitionZero. </li><li>What TransitionZero does and why it is important for reducing emissions.</li><li>The role of machine learning in data analysis.</li><li>TransitionZero and the Climate Trace organization. </li><li>How TransitionZero validates models.</li><li>Why domain expertise is indispensable.</li><li>How the approach differs between different facilities.</li><li>Measuring impact in accordance with TransitionZero’s mission.</li><li>Examples of the impact the organization has had in China and Japan.</li><li>The challenge of finding the right people to join the team. </li><li>Policies that enhance non-salary benefits to the team.</li><li>His advice not to lead with AI.</li><li>How TransitionZero is approaching the Future Energy Outlook Project. </li><li>His hope for the future of TransitionZero’s impact.</li></ul><p><strong>Quotes:</strong></p><p>“TransitionZero is a climate data analytics not-for-profit, co-founded in 2020.” — Matt Gray</p><p><br>“Another application we are just embarking on is using data science to estimate the productivity of wind and solar assets.” — Matt Gray</p><p><br></p><p>“One thing we have learned over the last two to three years is that domain expertise is indispensable when you’re building models.” — Matt Gray</p><p><br></p><p>“Our mission is for affordable and dependable energy for everyone.” — Matt Gray</p><p><br></p><p>“Don’t lead with AI. Lead with the use case and the problem that you are solving.” — Matt Gray</p><p><br></p><p><strong>Links:</strong></p><p><br></p><p><a href="https://twitter.com/matthewcgray">Matt Gray on Twitter<strong><br></strong></a><a href="https://www.linkedin.com/in/mattcgray/">Matt Gray on LinkedIn</a></p><p><a href="https://www.transitionzero.org/">TransitionZero</a></p><p><a href="https://twitter.com/TransitionZero">TransitionZero on Twitter</a></p><p><a href="https://www.linkedin.com/company/transitionzero/">TransitionZero on LinkedIn</a></p><p><a href="https://github.com/transition-zero">TransitionZero on Github</a></p><p><a href="https://climatetrace.org/">Climate Trace</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/ad66c3a9/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Ultrasound for Early Disease Detection with Kilian Koepsell from Caption Health</title>
      <itunes:episode>23</itunes:episode>
      <podcast:episode>23</podcast:episode>
      <itunes:title>Ultrasound for Early Disease Detection with Kilian Koepsell from Caption Health</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">94310a21-6cea-43d3-938c-a746d5a1624f</guid>
      <link>https://pixelscientia.com/podcast/ultrasound-for-early-disease-detection-with-kilian-koepsell-from-caption-health/</link>
      <description>
        <![CDATA[<p>Today, I am joined by Kilian Koepsell, co-founder and Chief Innovation Officer of Caption Health. We’re taking on the multifaceted topic of ultrasound for early disease detection. Join us as Kilian talks about the problem Caption Health identified in the world of ultrasound use, and how he is working to solve it. Hear how he is using machine learning to help practitioners to guide and interpret ultrasound imaging, why his first point of entry was cardiac health, and where the role of the machine ends and the medical expert begins. Kilian shares some challenges he has faced along the way, and encourages anyone with a similar idea to approach the FDA sooner, rather than later. Tune in today to hear how his concept aims to support healthcare in a changing world, and how he sees the future of Caption Health unfolding. Thanks for listening!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Kilian Koepsell, co-founder and Chief Innovation Officer of Caption Health.</li><li>What Caption Health does and why it is important for imaging. </li><li>Why there was a hurdle to get ultrasound technology used by more people.</li><li>The two kinds of feedback Caption Health provides: guidance and interpretation.</li><li>How machine learning is used to perform these two functions.</li><li>Why their first focus is on the heart and why it is one of the most difficult organs to image. </li><li>Measurements taken by the machine for a practitioner to interpret.</li><li>How the quality meter works to guide the probe and gives practitioners the feedback and confidence they need.</li><li>Challenges in training machine learning on ultrasound imagery. </li><li>Validating models across many variations.</li><li>Why it is so important to take FDA considerations into account from the beginning.</li><li>How Kilian ensures that he is developing technology that will be of use to practitioners.</li><li>How the Caption Health vision has changed since its inception.</li><li>Having a high level thesis to survive a changing world.</li><li>Where Kilian sees the impact of Caption Health in five years.</li></ul><p><strong>Quotes:</strong></p><p>“We realized that even though the hardware was available at a much lower cost to many more people, there was a big hurdle to get the ultrasound used by more people because it is actually very difficult to acquire good ultrasound images.” — Kilian Koepsell</p><p><br></p><p>“We use machine learning to understand the relationship between the imagery and the position of the probe in 3D space, and then guide the user to the right spot without the user having to even understand what they are looking at.” — Kilian Koepsell</p><p><br></p><p>“By just looking at the imagery you can see if the heart is not pumping well or if it’s enlarged or if the valves are not closing properly - all different kinds of structural heart diseases.” — Kilian Koepsell</p><p><br></p><p>“Normally you would require an expert to look over their shoulder and give them the feedback, but with this device, they can train themselves, and they get better over time by using it on patients.” — Kilian Koepsell</p><p><br></p><p>“Anyone who is trying something similar, I would encourage to get in contact with the FDA as early as possible.” — Kilian Koepsell</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://captionhealth.com/">Caption Health</a></p><p><a href="https://www.linkedin.com/in/kilian-koepsell/">Kilian Koepsell on LinkedIn</a></p><p><a href="https://twitter.com/kiliankoepsell">Kilian Koepsell on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Today, I am joined by Kilian Koepsell, co-founder and Chief Innovation Officer of Caption Health. We’re taking on the multifaceted topic of ultrasound for early disease detection. Join us as Kilian talks about the problem Caption Health identified in the world of ultrasound use, and how he is working to solve it. Hear how he is using machine learning to help practitioners to guide and interpret ultrasound imaging, why his first point of entry was cardiac health, and where the role of the machine ends and the medical expert begins. Kilian shares some challenges he has faced along the way, and encourages anyone with a similar idea to approach the FDA sooner, rather than later. Tune in today to hear how his concept aims to support healthcare in a changing world, and how he sees the future of Caption Health unfolding. Thanks for listening!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Kilian Koepsell, co-founder and Chief Innovation Officer of Caption Health.</li><li>What Caption Health does and why it is important for imaging. </li><li>Why there was a hurdle to get ultrasound technology used by more people.</li><li>The two kinds of feedback Caption Health provides: guidance and interpretation.</li><li>How machine learning is used to perform these two functions.</li><li>Why their first focus is on the heart and why it is one of the most difficult organs to image. </li><li>Measurements taken by the machine for a practitioner to interpret.</li><li>How the quality meter works to guide the probe and gives practitioners the feedback and confidence they need.</li><li>Challenges in training machine learning on ultrasound imagery. </li><li>Validating models across many variations.</li><li>Why it is so important to take FDA considerations into account from the beginning.</li><li>How Kilian ensures that he is developing technology that will be of use to practitioners.</li><li>How the Caption Health vision has changed since its inception.</li><li>Having a high level thesis to survive a changing world.</li><li>Where Kilian sees the impact of Caption Health in five years.</li></ul><p><strong>Quotes:</strong></p><p>“We realized that even though the hardware was available at a much lower cost to many more people, there was a big hurdle to get the ultrasound used by more people because it is actually very difficult to acquire good ultrasound images.” — Kilian Koepsell</p><p><br></p><p>“We use machine learning to understand the relationship between the imagery and the position of the probe in 3D space, and then guide the user to the right spot without the user having to even understand what they are looking at.” — Kilian Koepsell</p><p><br></p><p>“By just looking at the imagery you can see if the heart is not pumping well or if it’s enlarged or if the valves are not closing properly - all different kinds of structural heart diseases.” — Kilian Koepsell</p><p><br></p><p>“Normally you would require an expert to look over their shoulder and give them the feedback, but with this device, they can train themselves, and they get better over time by using it on patients.” — Kilian Koepsell</p><p><br></p><p>“Anyone who is trying something similar, I would encourage to get in contact with the FDA as early as possible.” — Kilian Koepsell</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://captionhealth.com/">Caption Health</a></p><p><a href="https://www.linkedin.com/in/kilian-koepsell/">Kilian Koepsell on LinkedIn</a></p><p><a href="https://twitter.com/kiliankoepsell">Kilian Koepsell on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 13 Mar 2023 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/d58c370d/e622415c.mp3" length="35778313" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/N0jQtsIOytKPaHtI_D7eA52-hwzJw0UengwZQYQhrnM/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEyNDAzNjMv/MTY3ODQwMDIwMi1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1486</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Today, I am joined by Kilian Koepsell, co-founder and Chief Innovation Officer of Caption Health. We’re taking on the multifaceted topic of ultrasound for early disease detection. Join us as Kilian talks about the problem Caption Health identified in the world of ultrasound use, and how he is working to solve it. Hear how he is using machine learning to help practitioners to guide and interpret ultrasound imaging, why his first point of entry was cardiac health, and where the role of the machine ends and the medical expert begins. Kilian shares some challenges he has faced along the way, and encourages anyone with a similar idea to approach the FDA sooner, rather than later. Tune in today to hear how his concept aims to support healthcare in a changing world, and how he sees the future of Caption Health unfolding. Thanks for listening!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>An introduction to Kilian Koepsell, co-founder and Chief Innovation Officer of Caption Health.</li><li>What Caption Health does and why it is important for imaging. </li><li>Why there was a hurdle to get ultrasound technology used by more people.</li><li>The two kinds of feedback Caption Health provides: guidance and interpretation.</li><li>How machine learning is used to perform these two functions.</li><li>Why their first focus is on the heart and why it is one of the most difficult organs to image. </li><li>Measurements taken by the machine for a practitioner to interpret.</li><li>How the quality meter works to guide the probe and gives practitioners the feedback and confidence they need.</li><li>Challenges in training machine learning on ultrasound imagery. </li><li>Validating models across many variations.</li><li>Why it is so important to take FDA considerations into account from the beginning.</li><li>How Kilian ensures that he is developing technology that will be of use to practitioners.</li><li>How the Caption Health vision has changed since its inception.</li><li>Having a high level thesis to survive a changing world.</li><li>Where Kilian sees the impact of Caption Health in five years.</li></ul><p><strong>Quotes:</strong></p><p>“We realized that even though the hardware was available at a much lower cost to many more people, there was a big hurdle to get the ultrasound used by more people because it is actually very difficult to acquire good ultrasound images.” — Kilian Koepsell</p><p><br></p><p>“We use machine learning to understand the relationship between the imagery and the position of the probe in 3D space, and then guide the user to the right spot without the user having to even understand what they are looking at.” — Kilian Koepsell</p><p><br></p><p>“By just looking at the imagery you can see if the heart is not pumping well or if it’s enlarged or if the valves are not closing properly - all different kinds of structural heart diseases.” — Kilian Koepsell</p><p><br></p><p>“Normally you would require an expert to look over their shoulder and give them the feedback, but with this device, they can train themselves, and they get better over time by using it on patients.” — Kilian Koepsell</p><p><br></p><p>“Anyone who is trying something similar, I would encourage to get in contact with the FDA as early as possible.” — Kilian Koepsell</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://captionhealth.com/">Caption Health</a></p><p><a href="https://www.linkedin.com/in/kilian-koepsell/">Kilian Koepsell on LinkedIn</a></p><p><a href="https://twitter.com/kiliankoepsell">Kilian Koepsell on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/d58c370d/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Resilient Agriculture with Manal Elarab from Regrow Ag</title>
      <itunes:episode>22</itunes:episode>
      <podcast:episode>22</podcast:episode>
      <itunes:title>Resilient Agriculture with Manal Elarab from Regrow Ag</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">dea3f746-6cf8-4a18-9482-970f3e7699a1</guid>
      <link>https://pixelscientia.com/podcast/resilient-agriculture-with-manal-elarab-from-regrow-ag/</link>
      <description>
        <![CDATA[<p>Climate change is impacting many industries across the globe, and the farming and agriculture industry is no exception. Changes in rainfall patterns, temperature, and increasingly extreme weather place farmers under immense strain and threaten food security. However, there is a solution, and Manal Elarab, the COO at Regrow Ag, is here to explain how Regrow is changing the farming industry.</p><p>Regrow Ag aims to empower food and agriculture industries to adopt, scale, and monetize resilient and regenerative agricultural practices. We start by learning about Manal, her professional career journey, the how and why behind Regrow Ag, and the company’s overall mission. We then discuss why agricultural practices need to change, and unpack the complex relationship climate change has with agriculture. Hear about how Regrow Ag is leveraging machine learning to enhance regenerative farming, what makes regenerative farming practices different, the different technological toolkits Regrow Ag has developed, and more. Tune in to discover how technology is being used to revolutionize the farming industry with Manal Elarab from Regrow Ag!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Hear about Manal’s professional career journey and what led her to Regrow Ag.</li><li>What Regrow Ag does and why it is important for agriculture and climate change.</li><li>Learn about regenerative agriculture and how it differs from other forms of agriculture.</li><li>How Regrow Ag leverages machine learning to help achieve its mission.</li><li>Manal explains what data is collected, how it is collected, and how it is used.</li><li>Overview of the challenges encountered when working with remote sensing data.</li><li>Learn how the models used can account for different types of variations in data.</li><li>How Regrow Ag engineers collaborate with other experts in order to get the required data.</li><li>Their approach to measuring the impact of the technology and solutions implemented.</li><li>Manal shares advice and insights for leaders of AI-powered startups.</li><li>What to expect in the near future from Regrow Ag.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Compared to other industries, agriculture almost finds itself on both sides of the climate change equation. It is what agriculture is doing to the climate and what climate change is doing to agriculture.” — Manal Elarab</p><p><br></p><p>“Machine learning plays a big role at Regrow. Our core offering is built on two key elements. A process-driven carbon model and the machine learning-based toolkit model.” — Manal Elarab</p><p><br></p><p>“I would say machine learning is a tool that powers a product.” — Manal Elarab</p><p><br></p><p>“I imagine myself and my kids walking down the grocery aisle, picking up snacks and cereal boxes, and pasta, and selecting products that the growers have used regenerative practices in producing those crops.” — Manal Elarab</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/digitalag/">Manal Elarab on LinkedIn</a></p><p><a href="https://www.regrow.ag">Regrow Ag</a></p><p><a href="https://www.regrow.ag/resources/blog">Regrow Ag Blog</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Climate change is impacting many industries across the globe, and the farming and agriculture industry is no exception. Changes in rainfall patterns, temperature, and increasingly extreme weather place farmers under immense strain and threaten food security. However, there is a solution, and Manal Elarab, the COO at Regrow Ag, is here to explain how Regrow is changing the farming industry.</p><p>Regrow Ag aims to empower food and agriculture industries to adopt, scale, and monetize resilient and regenerative agricultural practices. We start by learning about Manal, her professional career journey, the how and why behind Regrow Ag, and the company’s overall mission. We then discuss why agricultural practices need to change, and unpack the complex relationship climate change has with agriculture. Hear about how Regrow Ag is leveraging machine learning to enhance regenerative farming, what makes regenerative farming practices different, the different technological toolkits Regrow Ag has developed, and more. Tune in to discover how technology is being used to revolutionize the farming industry with Manal Elarab from Regrow Ag!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Hear about Manal’s professional career journey and what led her to Regrow Ag.</li><li>What Regrow Ag does and why it is important for agriculture and climate change.</li><li>Learn about regenerative agriculture and how it differs from other forms of agriculture.</li><li>How Regrow Ag leverages machine learning to help achieve its mission.</li><li>Manal explains what data is collected, how it is collected, and how it is used.</li><li>Overview of the challenges encountered when working with remote sensing data.</li><li>Learn how the models used can account for different types of variations in data.</li><li>How Regrow Ag engineers collaborate with other experts in order to get the required data.</li><li>Their approach to measuring the impact of the technology and solutions implemented.</li><li>Manal shares advice and insights for leaders of AI-powered startups.</li><li>What to expect in the near future from Regrow Ag.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Compared to other industries, agriculture almost finds itself on both sides of the climate change equation. It is what agriculture is doing to the climate and what climate change is doing to agriculture.” — Manal Elarab</p><p><br></p><p>“Machine learning plays a big role at Regrow. Our core offering is built on two key elements. A process-driven carbon model and the machine learning-based toolkit model.” — Manal Elarab</p><p><br></p><p>“I would say machine learning is a tool that powers a product.” — Manal Elarab</p><p><br></p><p>“I imagine myself and my kids walking down the grocery aisle, picking up snacks and cereal boxes, and pasta, and selecting products that the growers have used regenerative practices in producing those crops.” — Manal Elarab</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/digitalag/">Manal Elarab on LinkedIn</a></p><p><a href="https://www.regrow.ag">Regrow Ag</a></p><p><a href="https://www.regrow.ag/resources/blog">Regrow Ag Blog</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 06 Mar 2023 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/5eafd018/9d6cfd3f.mp3" length="26061040" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/Y1PGzdkfw0fSyuPRPyVtwZNPNOpdtmsZtei-jNmwyng/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEyMDk4MzMv/MTY3NjU5NTc0My1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1078</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Climate change is impacting many industries across the globe, and the farming and agriculture industry is no exception. Changes in rainfall patterns, temperature, and increasingly extreme weather place farmers under immense strain and threaten food security. However, there is a solution, and Manal Elarab, the COO at Regrow Ag, is here to explain how Regrow is changing the farming industry.</p><p>Regrow Ag aims to empower food and agriculture industries to adopt, scale, and monetize resilient and regenerative agricultural practices. We start by learning about Manal, her professional career journey, the how and why behind Regrow Ag, and the company’s overall mission. We then discuss why agricultural practices need to change, and unpack the complex relationship climate change has with agriculture. Hear about how Regrow Ag is leveraging machine learning to enhance regenerative farming, what makes regenerative farming practices different, the different technological toolkits Regrow Ag has developed, and more. Tune in to discover how technology is being used to revolutionize the farming industry with Manal Elarab from Regrow Ag!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Hear about Manal’s professional career journey and what led her to Regrow Ag.</li><li>What Regrow Ag does and why it is important for agriculture and climate change.</li><li>Learn about regenerative agriculture and how it differs from other forms of agriculture.</li><li>How Regrow Ag leverages machine learning to help achieve its mission.</li><li>Manal explains what data is collected, how it is collected, and how it is used.</li><li>Overview of the challenges encountered when working with remote sensing data.</li><li>Learn how the models used can account for different types of variations in data.</li><li>How Regrow Ag engineers collaborate with other experts in order to get the required data.</li><li>Their approach to measuring the impact of the technology and solutions implemented.</li><li>Manal shares advice and insights for leaders of AI-powered startups.</li><li>What to expect in the near future from Regrow Ag.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Compared to other industries, agriculture almost finds itself on both sides of the climate change equation. It is what agriculture is doing to the climate and what climate change is doing to agriculture.” — Manal Elarab</p><p><br></p><p>“Machine learning plays a big role at Regrow. Our core offering is built on two key elements. A process-driven carbon model and the machine learning-based toolkit model.” — Manal Elarab</p><p><br></p><p>“I would say machine learning is a tool that powers a product.” — Manal Elarab</p><p><br></p><p>“I imagine myself and my kids walking down the grocery aisle, picking up snacks and cereal boxes, and pasta, and selecting products that the growers have used regenerative practices in producing those crops.” — Manal Elarab</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/digitalag/">Manal Elarab on LinkedIn</a></p><p><a href="https://www.regrow.ag">Regrow Ag</a></p><p><a href="https://www.regrow.ag/resources/blog">Regrow Ag Blog</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/5eafd018/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Identifying Mental Health Challenges through Speech with Rima Seiilova-Olson from Kintsugi</title>
      <itunes:episode>21</itunes:episode>
      <podcast:episode>21</podcast:episode>
      <itunes:title>Identifying Mental Health Challenges through Speech with Rima Seiilova-Olson from Kintsugi</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">69f56d8b-d129-4ac3-9970-71a7684a894d</guid>
      <link>https://pixelscientia.com/podcast/identifying-mental-health-challenges-through-speech-with-rima-seiilova-olson-from-kintsugi/</link>
      <description>
        <![CDATA[<p>Over the past few years, there has been a concerning rise in rates of depression, anxiety, and other mental health disorders, especially among adolescents and young adults. In addition, the current state of our national healthcare system is not set up to offer equitable access to all, depriving a huge portion of the population of the help they need. The new platform, Kintsugi, has been taking important steps to address these shortcomings by developing AI learning models that detect signs of depression and anxiety from speech samples.</p><p>Today on the show I welcome Rima Seiilova-Olson, Co-Founder and Chief Scientist at Kintsugi, to talk about the current state of mental health care and what Kintsugi is doing to offer support to the many individuals who need it. I talk with Rima about her difficult experience with postpartum depression, her subsequent struggle to access mental health care, and how these events (combined with her expertise as a software engineer) led her to co-found Kintsugi. Rima goes on to explain how Kintsugi can be used as a tool by mental health professionals, and the benefits of incorporating it into clinical workflows. We also discuss some of the biggest challenges of working with speech data, the systems they are putting in place to combat bias, and how psychiatrists are helping them validate their speech data. To learn more about this incredible technology, and the life-altering impact it could have, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Get to know today’s guest, Rima Seiilova-Olson, Co-Founder and Chief Scientist at Kintsugi.</li><li>The set of circumstances that inspired Rima to create Kintsugi.</li><li>What it means to bring quantifiable and scientific measures into the field of mental health.</li><li>How Kintsugi detects signs of depression and anxiety from speech samples.</li><li>Understanding the benefits of incorporating this technology into clinical workflows.</li><li>The integral role of machine learning in this technology.</li><li>The limitations of traditional healthcare systems.</li><li>How Kintsugi is helping more people gain access to mental health care.</li><li>Why Kintsugi uses psychiatrists to collect and validate data.</li><li>The biases that can occur in models trained on speech data.</li><li>The measures Kintsugi has put in place to mitigate these biases.</li><li>Some of the biggest challenges of working with speech data.</li><li>Rima’s insights on the potential financial, clinical, and emotional impacts of this new technology.</li><li>Rima’s advice to other founders of AI-powered startups.</li><li>Why it’s so important to combat the trend of digital health companies prioritizing financial gain over clinical impact.</li><li>The expected impact of Kintsugi over the next five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“My co-founder and CEO, Grace Chang, and I put our heads together and decided to start by bringing quantifiable and scientific measures into the field of mental health. Which has mainly been qualitatively and subjectively driven for many decades.” — Rima Seiilova-Olson</p><p><br></p><p>“Instead of these clunky tools, we give [healthcare providers] seamless tools that can analyze depression or anxiety in their patients.” — Rima Seiilova-Olson</p><p><br></p><p>“I think the main impact that we, as founders, are excited about, is the emotional impact of our technology. Which is not quantifiable, but we believe it is going to be immense.” — Rima Seiilova-Olson</p><p><br></p><p>“By connecting patients to access, we’re going to have a profound effect on society. [A society] that is observing skyrocketing trends in the rates of depression and anxiety, especially among young adults and adolescents.” — Rima Seiilova-Olson</p><p><br></p><p>“We’re observing interesting trends where certain companies prioritize financial gains and revenue over clinical impact and the clinical outcomes for the patient. And those stories not only affect that one startup, it affects the whole industry.” — Rima Seiilova-Olson</p><p><br></p><p>“I think every single AI startup in healthcare needs to prioritize the ethical implications of their product. So that as an industry, we cover some of the damage that has been done by some of our colleagues.” — Rima Seiilova-Olson</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/rima-seiilova-olson/">Rima Seiilova-Olson on LinkedIn</a></p><p><a href="https://www.kintsugihealth.com/">Kintsugi</a></p><p><a href="https://apps.apple.com/us/app/kintsugi/id1418031227">Kintsugi Journaling App</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Over the past few years, there has been a concerning rise in rates of depression, anxiety, and other mental health disorders, especially among adolescents and young adults. In addition, the current state of our national healthcare system is not set up to offer equitable access to all, depriving a huge portion of the population of the help they need. The new platform, Kintsugi, has been taking important steps to address these shortcomings by developing AI learning models that detect signs of depression and anxiety from speech samples.</p><p>Today on the show I welcome Rima Seiilova-Olson, Co-Founder and Chief Scientist at Kintsugi, to talk about the current state of mental health care and what Kintsugi is doing to offer support to the many individuals who need it. I talk with Rima about her difficult experience with postpartum depression, her subsequent struggle to access mental health care, and how these events (combined with her expertise as a software engineer) led her to co-found Kintsugi. Rima goes on to explain how Kintsugi can be used as a tool by mental health professionals, and the benefits of incorporating it into clinical workflows. We also discuss some of the biggest challenges of working with speech data, the systems they are putting in place to combat bias, and how psychiatrists are helping them validate their speech data. To learn more about this incredible technology, and the life-altering impact it could have, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Get to know today’s guest, Rima Seiilova-Olson, Co-Founder and Chief Scientist at Kintsugi.</li><li>The set of circumstances that inspired Rima to create Kintsugi.</li><li>What it means to bring quantifiable and scientific measures into the field of mental health.</li><li>How Kintsugi detects signs of depression and anxiety from speech samples.</li><li>Understanding the benefits of incorporating this technology into clinical workflows.</li><li>The integral role of machine learning in this technology.</li><li>The limitations of traditional healthcare systems.</li><li>How Kintsugi is helping more people gain access to mental health care.</li><li>Why Kintsugi uses psychiatrists to collect and validate data.</li><li>The biases that can occur in models trained on speech data.</li><li>The measures Kintsugi has put in place to mitigate these biases.</li><li>Some of the biggest challenges of working with speech data.</li><li>Rima’s insights on the potential financial, clinical, and emotional impacts of this new technology.</li><li>Rima’s advice to other founders of AI-powered startups.</li><li>Why it’s so important to combat the trend of digital health companies prioritizing financial gain over clinical impact.</li><li>The expected impact of Kintsugi over the next five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“My co-founder and CEO, Grace Chang, and I put our heads together and decided to start by bringing quantifiable and scientific measures into the field of mental health. Which has mainly been qualitatively and subjectively driven for many decades.” — Rima Seiilova-Olson</p><p><br></p><p>“Instead of these clunky tools, we give [healthcare providers] seamless tools that can analyze depression or anxiety in their patients.” — Rima Seiilova-Olson</p><p><br></p><p>“I think the main impact that we, as founders, are excited about, is the emotional impact of our technology. Which is not quantifiable, but we believe it is going to be immense.” — Rima Seiilova-Olson</p><p><br></p><p>“By connecting patients to access, we’re going to have a profound effect on society. [A society] that is observing skyrocketing trends in the rates of depression and anxiety, especially among young adults and adolescents.” — Rima Seiilova-Olson</p><p><br></p><p>“We’re observing interesting trends where certain companies prioritize financial gains and revenue over clinical impact and the clinical outcomes for the patient. And those stories not only affect that one startup, it affects the whole industry.” — Rima Seiilova-Olson</p><p><br></p><p>“I think every single AI startup in healthcare needs to prioritize the ethical implications of their product. So that as an industry, we cover some of the damage that has been done by some of our colleagues.” — Rima Seiilova-Olson</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/rima-seiilova-olson/">Rima Seiilova-Olson on LinkedIn</a></p><p><a href="https://www.kintsugihealth.com/">Kintsugi</a></p><p><a href="https://apps.apple.com/us/app/kintsugi/id1418031227">Kintsugi Journaling App</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 27 Feb 2023 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/2b4d8cea/6e9b5671.mp3" length="43239245" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/-wrxHZcPeXkOFLIH4oNs77gjY2V8cZ3RN6nIuuLhFXw/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEyMDk4MzAv/MTY3NjU5NTYzNC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1796</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Over the past few years, there has been a concerning rise in rates of depression, anxiety, and other mental health disorders, especially among adolescents and young adults. In addition, the current state of our national healthcare system is not set up to offer equitable access to all, depriving a huge portion of the population of the help they need. The new platform, Kintsugi, has been taking important steps to address these shortcomings by developing AI learning models that detect signs of depression and anxiety from speech samples.</p><p>Today on the show I welcome Rima Seiilova-Olson, Co-Founder and Chief Scientist at Kintsugi, to talk about the current state of mental health care and what Kintsugi is doing to offer support to the many individuals who need it. I talk with Rima about her difficult experience with postpartum depression, her subsequent struggle to access mental health care, and how these events (combined with her expertise as a software engineer) led her to co-found Kintsugi. Rima goes on to explain how Kintsugi can be used as a tool by mental health professionals, and the benefits of incorporating it into clinical workflows. We also discuss some of the biggest challenges of working with speech data, the systems they are putting in place to combat bias, and how psychiatrists are helping them validate their speech data. To learn more about this incredible technology, and the life-altering impact it could have, be sure to tune in today!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Get to know today’s guest, Rima Seiilova-Olson, Co-Founder and Chief Scientist at Kintsugi.</li><li>The set of circumstances that inspired Rima to create Kintsugi.</li><li>What it means to bring quantifiable and scientific measures into the field of mental health.</li><li>How Kintsugi detects signs of depression and anxiety from speech samples.</li><li>Understanding the benefits of incorporating this technology into clinical workflows.</li><li>The integral role of machine learning in this technology.</li><li>The limitations of traditional healthcare systems.</li><li>How Kintsugi is helping more people gain access to mental health care.</li><li>Why Kintsugi uses psychiatrists to collect and validate data.</li><li>The biases that can occur in models trained on speech data.</li><li>The measures Kintsugi has put in place to mitigate these biases.</li><li>Some of the biggest challenges of working with speech data.</li><li>Rima’s insights on the potential financial, clinical, and emotional impacts of this new technology.</li><li>Rima’s advice to other founders of AI-powered startups.</li><li>Why it’s so important to combat the trend of digital health companies prioritizing financial gain over clinical impact.</li><li>The expected impact of Kintsugi over the next five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“My co-founder and CEO, Grace Chang, and I put our heads together and decided to start by bringing quantifiable and scientific measures into the field of mental health. Which has mainly been qualitatively and subjectively driven for many decades.” — Rima Seiilova-Olson</p><p><br></p><p>“Instead of these clunky tools, we give [healthcare providers] seamless tools that can analyze depression or anxiety in their patients.” — Rima Seiilova-Olson</p><p><br></p><p>“I think the main impact that we, as founders, are excited about, is the emotional impact of our technology. Which is not quantifiable, but we believe it is going to be immense.” — Rima Seiilova-Olson</p><p><br></p><p>“By connecting patients to access, we’re going to have a profound effect on society. [A society] that is observing skyrocketing trends in the rates of depression and anxiety, especially among young adults and adolescents.” — Rima Seiilova-Olson</p><p><br></p><p>“We’re observing interesting trends where certain companies prioritize financial gains and revenue over clinical impact and the clinical outcomes for the patient. And those stories not only affect that one startup, it affects the whole industry.” — Rima Seiilova-Olson</p><p><br></p><p>“I think every single AI startup in healthcare needs to prioritize the ethical implications of their product. So that as an industry, we cover some of the damage that has been done by some of our colleagues.” — Rima Seiilova-Olson</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/rima-seiilova-olson/">Rima Seiilova-Olson on LinkedIn</a></p><p><a href="https://www.kintsugihealth.com/">Kintsugi</a></p><p><a href="https://apps.apple.com/us/app/kintsugi/id1418031227">Kintsugi Journaling App</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/2b4d8cea/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Automated Emissions Reduction with Gavin McCormick from WattTime</title>
      <itunes:episode>20</itunes:episode>
      <podcast:episode>20</podcast:episode>
      <itunes:title>Automated Emissions Reduction with Gavin McCormick from WattTime</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">4479eff1-5c75-40f5-b3cf-8cc797b9f31d</guid>
      <link>https://pixelscientia.com/podcast/automated-emissions-reduction-with-gavin-mccormick-from-watttime/</link>
      <description>
        <![CDATA[<p>Creating a lasting impact on a large scale requires collaboration, and globally reducing emissions is one of the most impactful, large-scale goals one could have at this point in time.</p><p>My guest on today’s show is Gavin McCormick, an economist by training who has immersed himself in the world of machine learning through the founding of WattTime, a company fighting climate change by automating devices to optimize energy in real-time in order to lower their carbon footprints. Currently, there are one billion devices making use of WattTime's technology, and in the next few years, Gavin hopes to increase that number to 30 billion!</p><p>Tune in today to hear about the uncommon way that Gavin and his team approach their goals, the Climate TRACE project that has come about as a result of WattTime’s success, and why they don’t have any competitors! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Gavin’s educational background.</li><li>The discovery that led Gavin to found WattTime.</li><li>An explanation of what WattTime does.</li><li>The motivation behind the founding of Climate TRACE.</li><li>The role that machine learning plays at WattTime. </li><li>The single objective function of WattTime.</li><li>Methods that Gavin and his team use to ensure their actions are having the greatest impact.</li><li>How the WattTime approach differs from the approach utilized by many other machine learning organizations.</li><li>The importance of interdisciplinary collaboration.</li><li>How WattTime measures their impact.</li><li>One of WattTime’s major weaknesses.</li><li>Advice for leaders of AI-powered organizations.</li><li>What Gavin hopes WattTime will achieve in the next three to five years.  </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Renewable energy could have more impact if it could be cited in just the right locations and run at just the right times.” — Gavin McCormick</p><p><br></p><p>“You can really substantially reduce the carbon footprint of a power grid by shifting all of the load to moments when there's surplus clean energy instead of just random times.” — Gavin McCormick</p><p><br></p><p>“In any case where another organization, be they non-profit, university, for-profit, whatever, is rowing in a direction that is consistent with our mission and frankly doing it well, we would rather not spend the time and resources trying to recreate them or beat them. We would rather help them out.” — Gavin McCormick</p><p><br></p><p>“If you're really serious about impact, then someone else's success is not a threat to you. It's a benefit.” — Gavin McCormick</p><p><br></p><p><strong>Links Mentioned in Today’s Episode:</strong></p><p><a href="https://www.linkedin.com/in/gavinmccormick/">Gavin McCormick on LinkedIn</a></p><p><a href="https://www.watttime.org/">WattTime</a></p><p><a href="https://climatetrace.org/">Climate TRACE</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Creating a lasting impact on a large scale requires collaboration, and globally reducing emissions is one of the most impactful, large-scale goals one could have at this point in time.</p><p>My guest on today’s show is Gavin McCormick, an economist by training who has immersed himself in the world of machine learning through the founding of WattTime, a company fighting climate change by automating devices to optimize energy in real-time in order to lower their carbon footprints. Currently, there are one billion devices making use of WattTime's technology, and in the next few years, Gavin hopes to increase that number to 30 billion!</p><p>Tune in today to hear about the uncommon way that Gavin and his team approach their goals, the Climate TRACE project that has come about as a result of WattTime’s success, and why they don’t have any competitors! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Gavin’s educational background.</li><li>The discovery that led Gavin to found WattTime.</li><li>An explanation of what WattTime does.</li><li>The motivation behind the founding of Climate TRACE.</li><li>The role that machine learning plays at WattTime. </li><li>The single objective function of WattTime.</li><li>Methods that Gavin and his team use to ensure their actions are having the greatest impact.</li><li>How the WattTime approach differs from the approach utilized by many other machine learning organizations.</li><li>The importance of interdisciplinary collaboration.</li><li>How WattTime measures their impact.</li><li>One of WattTime’s major weaknesses.</li><li>Advice for leaders of AI-powered organizations.</li><li>What Gavin hopes WattTime will achieve in the next three to five years.  </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Renewable energy could have more impact if it could be cited in just the right locations and run at just the right times.” — Gavin McCormick</p><p><br></p><p>“You can really substantially reduce the carbon footprint of a power grid by shifting all of the load to moments when there's surplus clean energy instead of just random times.” — Gavin McCormick</p><p><br></p><p>“In any case where another organization, be they non-profit, university, for-profit, whatever, is rowing in a direction that is consistent with our mission and frankly doing it well, we would rather not spend the time and resources trying to recreate them or beat them. We would rather help them out.” — Gavin McCormick</p><p><br></p><p>“If you're really serious about impact, then someone else's success is not a threat to you. It's a benefit.” — Gavin McCormick</p><p><br></p><p><strong>Links Mentioned in Today’s Episode:</strong></p><p><a href="https://www.linkedin.com/in/gavinmccormick/">Gavin McCormick on LinkedIn</a></p><p><a href="https://www.watttime.org/">WattTime</a></p><p><a href="https://climatetrace.org/">Climate TRACE</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 20 Feb 2023 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/295de247/418680fa.mp3" length="33338538" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/_rrwB0wJR2uJYe_2cYMdVgCNmlhGT35h0FS6AANz5QI/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEyMDk4Mjgv/MTY3NjU5NTM3NC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1387</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Creating a lasting impact on a large scale requires collaboration, and globally reducing emissions is one of the most impactful, large-scale goals one could have at this point in time.</p><p>My guest on today’s show is Gavin McCormick, an economist by training who has immersed himself in the world of machine learning through the founding of WattTime, a company fighting climate change by automating devices to optimize energy in real-time in order to lower their carbon footprints. Currently, there are one billion devices making use of WattTime's technology, and in the next few years, Gavin hopes to increase that number to 30 billion!</p><p>Tune in today to hear about the uncommon way that Gavin and his team approach their goals, the Climate TRACE project that has come about as a result of WattTime’s success, and why they don’t have any competitors! </p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Gavin’s educational background.</li><li>The discovery that led Gavin to found WattTime.</li><li>An explanation of what WattTime does.</li><li>The motivation behind the founding of Climate TRACE.</li><li>The role that machine learning plays at WattTime. </li><li>The single objective function of WattTime.</li><li>Methods that Gavin and his team use to ensure their actions are having the greatest impact.</li><li>How the WattTime approach differs from the approach utilized by many other machine learning organizations.</li><li>The importance of interdisciplinary collaboration.</li><li>How WattTime measures their impact.</li><li>One of WattTime’s major weaknesses.</li><li>Advice for leaders of AI-powered organizations.</li><li>What Gavin hopes WattTime will achieve in the next three to five years.  </li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Renewable energy could have more impact if it could be cited in just the right locations and run at just the right times.” — Gavin McCormick</p><p><br></p><p>“You can really substantially reduce the carbon footprint of a power grid by shifting all of the load to moments when there's surplus clean energy instead of just random times.” — Gavin McCormick</p><p><br></p><p>“In any case where another organization, be they non-profit, university, for-profit, whatever, is rowing in a direction that is consistent with our mission and frankly doing it well, we would rather not spend the time and resources trying to recreate them or beat them. We would rather help them out.” — Gavin McCormick</p><p><br></p><p>“If you're really serious about impact, then someone else's success is not a threat to you. It's a benefit.” — Gavin McCormick</p><p><br></p><p><strong>Links Mentioned in Today’s Episode:</strong></p><p><a href="https://www.linkedin.com/in/gavinmccormick/">Gavin McCormick on LinkedIn</a></p><p><a href="https://www.watttime.org/">WattTime</a></p><p><a href="https://climatetrace.org/">Climate TRACE</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/295de247/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Making Cancer Treatments Affordable with Laura Kleiman from Reboot Rx</title>
      <itunes:episode>19</itunes:episode>
      <podcast:episode>19</podcast:episode>
      <itunes:title>Making Cancer Treatments Affordable with Laura Kleiman from Reboot Rx</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">f548faf9-f664-45da-816c-47635221b85c</guid>
      <link>https://pixelscientia.com/podcast/making-cancer-treatments-affordable-with-laura-kleiman-from-reboot-rx/</link>
      <description>
        <![CDATA[<p>Giving generic drugs a new life in oncology is a game-changing strategy for developing new and affordable treatment options for cancer patients. But it would take years to review the thousands upon thousands of published research studies on non-cancer drugs tested as cancer treatments to identify the most promising candidates. Luckily, nonprofit health tech startup Reboot Rx is stepping in to solve this problem! </p><p>I spoke to Founder and CEO, Laura Kleiman about how her company is fast-tracking the development of affordable cancer treatments using AI technology. Working with a team of biomedical and clinical scientists, Reboot Rx uses machine learning and natural language processing to analyze large volumes of scientific literature, identify the most viable drugs, and develop pathways to generate definitive evidence and change the standard of care so that patients can benefit from them. </p><p>In this episode, you’ll learn more about Reboot Rx’s multi-pronged approach and the challenges that come with processing such large volumes of data. Plus Laura shares her advice for tech leaders looking to solve problems that have a meaningful societal impact.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A look at Laura’s background and the personal story of what led her to create Reboot Rx.</li><li>The important work Reboot Rx does to repurpose generic drugs for the treatment of cancer.</li><li>An example that illustrates the role that machine learning plays in this process.</li><li>Challenges that come with analyzing this type of data.</li><li>How Reboot Rx’s machine learning developers collaborate with healthcare experts to ensure that their models are effective.</li><li>Advice for leaders of AI-powered startups and nonprofits: choose problems that matter!</li><li>How Reboot Rx is working to make their AI technology scalable.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Patients need both more effective but also more affordable treatment options. Reboot Rx is giving generic drugs a new life in oncology, taking drugs that are already available to treat other non-cancer indications and repurposing them for the treatment of cancer.”</p><p><br></p><p>“We use large language models to be able to process [large volumes] of scientific literature and predict which of these 600,000 studies are most likely to be relevant and extract key information about each of these studies.”</p><p><br></p><p>“There's so much opportunity for the use of AI and machine learning right now. I would encourage other leaders in the space to choose problems to solve that can have a meaningful societal impact.”</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://rebootrx.org/">Reboot Rx</a></p><p><a href="https://www.linkedin.com/in/laurakleiman/">Laura Kleiman on LinkedIn</a></p><p><a href="https://twitter.com/LauraBKleiman">Laura Kleiman on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Giving generic drugs a new life in oncology is a game-changing strategy for developing new and affordable treatment options for cancer patients. But it would take years to review the thousands upon thousands of published research studies on non-cancer drugs tested as cancer treatments to identify the most promising candidates. Luckily, nonprofit health tech startup Reboot Rx is stepping in to solve this problem! </p><p>I spoke to Founder and CEO, Laura Kleiman about how her company is fast-tracking the development of affordable cancer treatments using AI technology. Working with a team of biomedical and clinical scientists, Reboot Rx uses machine learning and natural language processing to analyze large volumes of scientific literature, identify the most viable drugs, and develop pathways to generate definitive evidence and change the standard of care so that patients can benefit from them. </p><p>In this episode, you’ll learn more about Reboot Rx’s multi-pronged approach and the challenges that come with processing such large volumes of data. Plus Laura shares her advice for tech leaders looking to solve problems that have a meaningful societal impact.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A look at Laura’s background and the personal story of what led her to create Reboot Rx.</li><li>The important work Reboot Rx does to repurpose generic drugs for the treatment of cancer.</li><li>An example that illustrates the role that machine learning plays in this process.</li><li>Challenges that come with analyzing this type of data.</li><li>How Reboot Rx’s machine learning developers collaborate with healthcare experts to ensure that their models are effective.</li><li>Advice for leaders of AI-powered startups and nonprofits: choose problems that matter!</li><li>How Reboot Rx is working to make their AI technology scalable.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Patients need both more effective but also more affordable treatment options. Reboot Rx is giving generic drugs a new life in oncology, taking drugs that are already available to treat other non-cancer indications and repurposing them for the treatment of cancer.”</p><p><br></p><p>“We use large language models to be able to process [large volumes] of scientific literature and predict which of these 600,000 studies are most likely to be relevant and extract key information about each of these studies.”</p><p><br></p><p>“There's so much opportunity for the use of AI and machine learning right now. I would encourage other leaders in the space to choose problems to solve that can have a meaningful societal impact.”</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://rebootrx.org/">Reboot Rx</a></p><p><a href="https://www.linkedin.com/in/laurakleiman/">Laura Kleiman on LinkedIn</a></p><p><a href="https://twitter.com/LauraBKleiman">Laura Kleiman on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 13 Feb 2023 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/c80bb898/c7395d8c.mp3" length="13882409" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/05PMNLsyEWxrYO2TVr8o3NKqT4uDDjWhUllU6Onwc-k/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEyMDEzNjUv/MTY3NjA0NzgzMS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>865</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Giving generic drugs a new life in oncology is a game-changing strategy for developing new and affordable treatment options for cancer patients. But it would take years to review the thousands upon thousands of published research studies on non-cancer drugs tested as cancer treatments to identify the most promising candidates. Luckily, nonprofit health tech startup Reboot Rx is stepping in to solve this problem! </p><p>I spoke to Founder and CEO, Laura Kleiman about how her company is fast-tracking the development of affordable cancer treatments using AI technology. Working with a team of biomedical and clinical scientists, Reboot Rx uses machine learning and natural language processing to analyze large volumes of scientific literature, identify the most viable drugs, and develop pathways to generate definitive evidence and change the standard of care so that patients can benefit from them. </p><p>In this episode, you’ll learn more about Reboot Rx’s multi-pronged approach and the challenges that come with processing such large volumes of data. Plus Laura shares her advice for tech leaders looking to solve problems that have a meaningful societal impact.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>A look at Laura’s background and the personal story of what led her to create Reboot Rx.</li><li>The important work Reboot Rx does to repurpose generic drugs for the treatment of cancer.</li><li>An example that illustrates the role that machine learning plays in this process.</li><li>Challenges that come with analyzing this type of data.</li><li>How Reboot Rx’s machine learning developers collaborate with healthcare experts to ensure that their models are effective.</li><li>Advice for leaders of AI-powered startups and nonprofits: choose problems that matter!</li><li>How Reboot Rx is working to make their AI technology scalable.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“Patients need both more effective but also more affordable treatment options. Reboot Rx is giving generic drugs a new life in oncology, taking drugs that are already available to treat other non-cancer indications and repurposing them for the treatment of cancer.”</p><p><br></p><p>“We use large language models to be able to process [large volumes] of scientific literature and predict which of these 600,000 studies are most likely to be relevant and extract key information about each of these studies.”</p><p><br></p><p>“There's so much opportunity for the use of AI and machine learning right now. I would encourage other leaders in the space to choose problems to solve that can have a meaningful societal impact.”</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://rebootrx.org/">Reboot Rx</a></p><p><a href="https://www.linkedin.com/in/laurakleiman/">Laura Kleiman on LinkedIn</a></p><p><a href="https://twitter.com/LauraBKleiman">Laura Kleiman on Twitter</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/c80bb898/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Improving Fish Farm Efficiency with Bryton Shang from Aquabyte</title>
      <itunes:episode>18</itunes:episode>
      <podcast:episode>18</podcast:episode>
      <itunes:title>Improving Fish Farm Efficiency with Bryton Shang from Aquabyte</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">4b54bf16-a12a-4610-8364-28e044b28753</guid>
      <link>https://pixelscientia.com/podcast/improving-fish-farm-efficiency-with-bryton-shang-from-aquabyte/</link>
      <description>
        <![CDATA[<p>Today I'm joined by the Founder and CEO of Aquabyte, Bryton Shang, to discuss his mission to improve and enable fish farm efficiency and sustainability. Bryton fills us in on the role machine learning plays in monitoring underwater fish farm environments, the challenges of gathering and annotating data to build and train their models, and how their human-in-the-loop QA process converges to find solutions. Tune in to discover how Aquabyte’s mission-oriented, multidisciplinary, and multimodal nature impacts recruitment, and hear Bryton’s astute recruitment advice for leaders in the field. Aquabyte is a stellar example of an AI-powered startup looking to create a better, more sustainable future for the world at large.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Bryson Shang’s background and what led him to create Aquabyte.</li><li>Aquabyte’s mission to enable efficient and sustainable fish farming.</li><li>The role machine learning plays in monitoring underwater fish farm environments.</li><li>How Aquabyte built their ML models.</li><li>The practical challenges of training their models and their solution-finding systems.</li><li>How Aquabyte’s mission-oriented, multidisciplinary, and multimodal nature impacts recruitment.</li><li>Bryton’s recruitment advice for other leaders of AI-powered startups.</li><li>His vision for Aquabyte’s impact in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[At] Aquabyte, we're focused on how machine learning and computer vision can help fish farmers be more efficient and sustainable.”</p><p><br></p><p>“There’s definitely a mission-oriented aspect to [Aquabyte], which is attractive to a lot of folks that are looking for more of a mission-oriented bent.”</p><p><br></p><p>“AI is a broad label, and I think the business domain in which you apply AI and how you apply it is really important, ultimately, to the success.”</p><p><br></p><p>“By having autonomous fish farms, or even on land where you can have much more scalability of fish farming, then you can really increase the supply of fish.”</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://aquabyte.ai/">Aquabyte</a></p><p><a href="https://www.linkedin.com/in/brytonshang/">Bryton Shang on LinkedIn</a></p><p><a href="https://www.youtube.com/watch?v=YZ_qJ5JFD3I&amp;ab_channel=AmazonWebServices">Now Go Build with Werner Vogels – S1E3 Bergen</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Today I'm joined by the Founder and CEO of Aquabyte, Bryton Shang, to discuss his mission to improve and enable fish farm efficiency and sustainability. Bryton fills us in on the role machine learning plays in monitoring underwater fish farm environments, the challenges of gathering and annotating data to build and train their models, and how their human-in-the-loop QA process converges to find solutions. Tune in to discover how Aquabyte’s mission-oriented, multidisciplinary, and multimodal nature impacts recruitment, and hear Bryton’s astute recruitment advice for leaders in the field. Aquabyte is a stellar example of an AI-powered startup looking to create a better, more sustainable future for the world at large.</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Bryson Shang’s background and what led him to create Aquabyte.</li><li>Aquabyte’s mission to enable efficient and sustainable fish farming.</li><li>The role machine learning plays in monitoring underwater fish farm environments.</li><li>How Aquabyte built their ML models.</li><li>The practical challenges of training their models and their solution-finding systems.</li><li>How Aquabyte’s mission-oriented, multidisciplinary, and multimodal nature impacts recruitment.</li><li>Bryton’s recruitment advice for other leaders of AI-powered startups.</li><li>His vision for Aquabyte’s impact in the next three to five years.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“[At] Aquabyte, we're focused on how machine learning and computer vision can help fish farmers be more efficient and sustainable.”</p><p><br></p><p>“There’s definitely a mission-oriented aspect to [Aquabyte], which is attractive to a lot of folks that are looking for more of a mission-oriented bent.”</p><p><br></p><p>“AI is a broad label, and I think the business domain in which you apply AI and how you apply it is really important, ultimately, to the success.”</p><p><br></p><p>“By having autonomous fish farms, or even on land where you can have much more scalability of fish farming, then you can really increase the supply of fish.”</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://aquabyte.ai/">Aquabyte</a></p><p><a href="https://www.linkedin.com/in/brytonshang/">Bryton Shang on LinkedIn</a></p><p><a href="https://www.youtube.com/watch?v=YZ_qJ5JFD3I&amp;ab_channel=AmazonWebServices">Now Go Build with Werner Vogels – S1E3 Bergen</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 06 Feb 2023 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/dee49f83/0f4704a3.mp3" length="20820333" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/i3xsVhQt9ICOkaarGQazXxZl5XP2ALgqwNKACAkP1pI/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzExOTE5MDkv/MTY3NTM3ODEyMC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1297</itunes:duration>
      <itunes:summary>Today I'm joined by the Founder and CEO of Aquabyte, Bryton Shang, to discuss his mission to improve and enable fish farm efficiency and sustainability. Bryton fills us in on the role machine learning plays in monitoring underwater fish farm environments, the challenges of gathering and annotating data to build and train their models, and how their human-in-the-loop QA process converges to find solutions. Tune in to discover how Aquabyte’s mission-oriented, multidisciplinary, and multimodal nature impacts recruitment, and hear Bryton’s astute recruitment advice for leaders in the field. Aquabyte is a stellar example of an AI-powered startup looking to create a better, more sustainable future for the world at large.</itunes:summary>
      <itunes:subtitle>Today I'm joined by the Founder and CEO of Aquabyte, Bryton Shang, to discuss his mission to improve and enable fish farm efficiency and sustainability. Bryton fills us in on the role machine learning plays in monitoring underwater fish farm environments,</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/dee49f83/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Detecting Breast Cancer Earlier with Tobias Rijken from Kheiron Medical</title>
      <itunes:episode>17</itunes:episode>
      <podcast:episode>17</podcast:episode>
      <itunes:title>Detecting Breast Cancer Earlier with Tobias Rijken from Kheiron Medical</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">350338cb-2834-4c9b-ba90-5709b8ab8e2b</guid>
      <link>https://pixelscientia.com/podcast/detecting-breast-cancer-earlier-with-tobias-rijken-from-kheiron-medical/</link>
      <description>
        <![CDATA[<p>All women face the risk of breast cancer, but early detection can greatly increase the chances of a positive outcome and reduce the need for aggressive treatment options. In this episode, I talk with Tobias Rijken, CTO and co-founder of Kheiron Medical Technologies, about leveraging AI for detecting breast cancer. We discuss the role of AI in improving medical care, the power of vertical integration and feedback loops, and what makes Kheiron different from other AI startups. Hear about the challenges of acquiring reliable data, whether using generative models is beneficial, details about the products Kheiron has created, and much more!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Tobias's professional background and why he created Kheiron Medical Technologies.</li><li>Learn about the amazing work Kheiron Medical Technologies does and why it is important.</li><li>Overview of why detecting breast cancer early is so vital and the challenges of screening.</li><li>How AI can help resolve the current challenges in cancer screening.</li><li>He explains the machine learning process and training the model used.</li><li>The complications encountered in working with radiology images.</li><li>Find out why image quality is key to the machine learning process.</li><li>How he is able to account for the variation of technology and methods used.</li><li>Outline of the regulatory process and how it impacts machine learning model development.</li><li>Hear advice Tobias has for other leaders of AI-powered startups.</li><li>Details about how Tobias approaches improving the models over time.</li><li>Tobias tells us what Kheiron Medical Technologies has planned for the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“What I liked so much about machine learning is the ability it has to solve real-world problems. And in my opinion, real-world machine learning is very different from academic machine learning.”</p><p><br></p><p>“Either the right information isn't available, or it is inaccurate, or there's missing information. We see AI as a tool to help address those information problems.”</p><p><br></p><p>“The challenge when you sample uniformly from your whole dataset is that there will be cases you've sampled, where you may not have ground truth.”</p><p><br></p><p>“For me, when I started this company, this was not about building a great model that has a great performance on a test dataset. This is about getting AI into the real world.”</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/tobiasmr/">Tobias Rijken on LinkedIn</a></p><p><a href="https://twitter.com/tobiasrijken">Tobias Rijken on Twitter</a></p><p><a href="https://www.kheironmed.com">Kheiron Medical Technologies</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>All women face the risk of breast cancer, but early detection can greatly increase the chances of a positive outcome and reduce the need for aggressive treatment options. In this episode, I talk with Tobias Rijken, CTO and co-founder of Kheiron Medical Technologies, about leveraging AI for detecting breast cancer. We discuss the role of AI in improving medical care, the power of vertical integration and feedback loops, and what makes Kheiron different from other AI startups. Hear about the challenges of acquiring reliable data, whether using generative models is beneficial, details about the products Kheiron has created, and much more!</p><p><br></p><p><strong>Key Points:</strong></p><ul><li>Tobias's professional background and why he created Kheiron Medical Technologies.</li><li>Learn about the amazing work Kheiron Medical Technologies does and why it is important.</li><li>Overview of why detecting breast cancer early is so vital and the challenges of screening.</li><li>How AI can help resolve the current challenges in cancer screening.</li><li>He explains the machine learning process and training the model used.</li><li>The complications encountered in working with radiology images.</li><li>Find out why image quality is key to the machine learning process.</li><li>How he is able to account for the variation of technology and methods used.</li><li>Outline of the regulatory process and how it impacts machine learning model development.</li><li>Hear advice Tobias has for other leaders of AI-powered startups.</li><li>Details about how Tobias approaches improving the models over time.</li><li>Tobias tells us what Kheiron Medical Technologies has planned for the future.</li></ul><p><br></p><p><strong>Quotes:</strong></p><p>“What I liked so much about machine learning is the ability it has to solve real-world problems. And in my opinion, real-world machine learning is very different from academic machine learning.”</p><p><br></p><p>“Either the right information isn't available, or it is inaccurate, or there's missing information. We see AI as a tool to help address those information problems.”</p><p><br></p><p>“The challenge when you sample uniformly from your whole dataset is that there will be cases you've sampled, where you may not have ground truth.”</p><p><br></p><p>“For me, when I started this company, this was not about building a great model that has a great performance on a test dataset. This is about getting AI into the real world.”</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.linkedin.com/in/tobiasmr/">Tobias Rijken on LinkedIn</a></p><p><a href="https://twitter.com/tobiasrijken">Tobias Rijken on Twitter</a></p><p><a href="https://www.kheironmed.com">Kheiron Medical Technologies</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 30 Jan 2023 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/2b7c856e/926f4a1c.mp3" length="26872282" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/p-6FExRIWUg6Zxx3dDWcVPTcXtkkqsY9FSyBBcRRxxw/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzExODM1MDYv/MTY3NDkxNTIzOS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1675</itunes:duration>
      <itunes:summary>All women face the risk of breast cancer, but early detection can greatly increase the chances of a positive outcome and reduce the need for aggressive treatment options. In this episode, I talk with Tobias Rijken, CTO and co-founder of Kheiron Medical Technologies, about leveraging AI for detecting breast cancer. We discuss the role of AI in improving medical care, the power of vertical integration and feedback loops, and what makes Kheiron different from other AI startups. Hear about the challenges of acquiring reliable data, whether using generative models is beneficial, details about the products Kheiron has created, and much more!</itunes:summary>
      <itunes:subtitle>All women face the risk of breast cancer, but early detection can greatly increase the chances of a positive outcome and reduce the need for aggressive treatment options. In this episode, I talk with Tobias Rijken, CTO and co-founder of Kheiron Medical Te</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/2b7c856e/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Monitoring Fields for Precision Agriculture with Gershom Kutliroff from Taranis</title>
      <itunes:episode>16</itunes:episode>
      <podcast:episode>16</podcast:episode>
      <itunes:title>Monitoring Fields for Precision Agriculture with Gershom Kutliroff from Taranis</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">5055d391-4a3f-4068-87da-410585e113b3</guid>
      <link>https://pixelscientia.com/podcast/monitoring-fields-for-precision-agriculture-with-gershom-kutliroff-from-taranis/</link>
      <description>
        <![CDATA[<p>In this episode, I talk with Gershom Kutliroff, CTO of Taranis, about precision agriculture. Taranis uses computer vision to monitor fields, providing critical insights to growers. Gershom and I talked about how they gather and annotate data, the challenges they encounter in working with aerial imagery, and how they validate their models and accommodate data drift with continuous learning.</p><p><br></p><p><strong>Quotes</strong>:</p><p><em>“Taranis is using drone technology to capture imagery and then use AI to process that imagery to understand what's happening in grower's field.”</em></p><p><br></p><p><em>“It becomes increasingly difficult to maintain consistent quality levels if you're working with tens or even hundreds of annotators. But when you have AI models, then you have the ability to control the quality of the insights that you're generating.”</em></p><p><br></p><p><em>“There's a lot of discussion in the last few years in the AI space about data-centric versus model-centric. Model centric would be the case where in your development you invest a lot in  choosing the right architecture that optimizes your performance, gives you the best results for your models, or spending a lot of time with hyper parameters and that type of work. And data-centric is you spend a lot more time making sure that your data set is clean, that you've got that it's balanced, you've got the right amount of classes for the problem that you're trying to solve.”</em></p><p><br></p><p><em>“We struggle with the problem of long tail distributions. If I take diseases as an example, there are some diseases that can cause a lot of damage to the crops. But they're very rare in terms of how often they actually occur in grower's fields.”</em></p><p><br></p><p><em>“Because we're running our own operations and so we're flying our own drones, we've also  invested in the software that's running on the drones when we're flying. So the images the drone pilot captures in the field are validated in the field. We have algorithms running on the edge to be able to check the quality of those images. And then if the images are not the quality that we expect them to get, the pilot knows while he's still there at the field and he can fly again.”</em></p><p><br></p><p><em>“For a lot of the models that we use you really need domain experts. You really need trained agronomists who can look at these images.”</em></p><p><br></p><p><em>“A certain percentage of all of the missions that we've flown are sent for review by our in-house agronomists before we release them to customers. So that's a really critical piece of how we do validation, and that also gives us a high level of confidence internally that the product that we're releasing to our customers stands by the quality that we expect it to.”</em></p><p><br></p><p><em>“We do suffer from this type of data drift where the data that we're seeing in production is not exactly in the same distribution as the data that we used to train. So the most effective technique that we've seen is to implement some kind of a continuous learning type of framework whereby we are able to take data that we're capturing in production, so when we're actually live   and servicing our customers' fields. And then the data that doesn't have a good correspondence with the distribution of the training data that was used for the models, we can then filter that data out. We can extract that data and use it to quickly retrain the models, to adapt the models, and then deploy those models back into production.”</em></p><p><br></p><p><em>“The company started by offering a product based on manual tagging, which didn't have any AI technology at the beginning, which allows it to offer products and service customers and start building this very rich database that we leverage now.”</em></p><p><br></p><p><strong>Links</strong>:</p><p><a href="https://taranis.com/">Taranis</a></p><p><a href="https://www.linkedin.com/in/gershom-kutliroff-9a89611/">Gerhsom Kutliroff</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I talk with Gershom Kutliroff, CTO of Taranis, about precision agriculture. Taranis uses computer vision to monitor fields, providing critical insights to growers. Gershom and I talked about how they gather and annotate data, the challenges they encounter in working with aerial imagery, and how they validate their models and accommodate data drift with continuous learning.</p><p><br></p><p><strong>Quotes</strong>:</p><p><em>“Taranis is using drone technology to capture imagery and then use AI to process that imagery to understand what's happening in grower's field.”</em></p><p><br></p><p><em>“It becomes increasingly difficult to maintain consistent quality levels if you're working with tens or even hundreds of annotators. But when you have AI models, then you have the ability to control the quality of the insights that you're generating.”</em></p><p><br></p><p><em>“There's a lot of discussion in the last few years in the AI space about data-centric versus model-centric. Model centric would be the case where in your development you invest a lot in  choosing the right architecture that optimizes your performance, gives you the best results for your models, or spending a lot of time with hyper parameters and that type of work. And data-centric is you spend a lot more time making sure that your data set is clean, that you've got that it's balanced, you've got the right amount of classes for the problem that you're trying to solve.”</em></p><p><br></p><p><em>“We struggle with the problem of long tail distributions. If I take diseases as an example, there are some diseases that can cause a lot of damage to the crops. But they're very rare in terms of how often they actually occur in grower's fields.”</em></p><p><br></p><p><em>“Because we're running our own operations and so we're flying our own drones, we've also  invested in the software that's running on the drones when we're flying. So the images the drone pilot captures in the field are validated in the field. We have algorithms running on the edge to be able to check the quality of those images. And then if the images are not the quality that we expect them to get, the pilot knows while he's still there at the field and he can fly again.”</em></p><p><br></p><p><em>“For a lot of the models that we use you really need domain experts. You really need trained agronomists who can look at these images.”</em></p><p><br></p><p><em>“A certain percentage of all of the missions that we've flown are sent for review by our in-house agronomists before we release them to customers. So that's a really critical piece of how we do validation, and that also gives us a high level of confidence internally that the product that we're releasing to our customers stands by the quality that we expect it to.”</em></p><p><br></p><p><em>“We do suffer from this type of data drift where the data that we're seeing in production is not exactly in the same distribution as the data that we used to train. So the most effective technique that we've seen is to implement some kind of a continuous learning type of framework whereby we are able to take data that we're capturing in production, so when we're actually live   and servicing our customers' fields. And then the data that doesn't have a good correspondence with the distribution of the training data that was used for the models, we can then filter that data out. We can extract that data and use it to quickly retrain the models, to adapt the models, and then deploy those models back into production.”</em></p><p><br></p><p><em>“The company started by offering a product based on manual tagging, which didn't have any AI technology at the beginning, which allows it to offer products and service customers and start building this very rich database that we leverage now.”</em></p><p><br></p><p><strong>Links</strong>:</p><p><a href="https://taranis.com/">Taranis</a></p><p><a href="https://www.linkedin.com/in/gershom-kutliroff-9a89611/">Gerhsom Kutliroff</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 23 Jan 2023 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/00153741/cf1a6265.mp3" length="33301309" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/dQIfk3iRUuVPXV7pD8Gp2leLVl3nK6_MapU5JDmCrRI/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzExNzQwMzAv/MTY3NDI2Njk0MS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>2079</itunes:duration>
      <itunes:summary>In this episode, I talk with Gershom Kutliroff, CTO of Taranis, about precision agriculture. Taranis uses computer vision to monitor fields, providing critical insights to growers. Gershom and I talked about how they gather and annotate data, the challenges they encounter in working with aerial imagery, and how they validate their models and accommodate data drift with continuous learning.</itunes:summary>
      <itunes:subtitle>In this episode, I talk with Gershom Kutliroff, CTO of Taranis, about precision agriculture. Taranis uses computer vision to monitor fields, providing critical insights to growers. Gershom and I talked about how they gather and annotate data, the challeng</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/00153741/transcript.vtt" type="text/vtt" rel="captions"/>
    </item>
    <item>
      <title>Improving Patient Outcomes with Vinod Subramanian from Syapse</title>
      <itunes:episode>15</itunes:episode>
      <podcast:episode>15</podcast:episode>
      <itunes:title>Improving Patient Outcomes with Vinod Subramanian from Syapse</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">d1ad75ac-6c24-4b1b-8f8e-7e99881fb106</guid>
      <link>https://pixelscientia.com/podcast/improving-patient-outcomes-with-vinod-subramanian-from-syapse/</link>
      <description>
        <![CDATA[<p>In this episode, I talk with Vinod Subramanian, Chief Data and Product Development Officer at Syapse, about machine learning for healthcare and advancements in cancer treatment. Syapse is a real world evidence company dedicated to improving outcomes for cancer and other serious diseases. Vinod and I talked about the types of healthcare data they work with, the data challenges they encounter, how they validate their models, and how they mitigate bias.</p><p><strong>QUOTES:<br></strong><em>"Technology is not the answer exemplified of the intent. And the fundamental question, I think, that all of us are confronted by: what is the intent and what in the world that we want to try to help shape?"</em></p><p>"There are infinite possibilities in the terms of patient care with aggregated and harmonized data in healthcare. We all know about the point that data in general is fragmented and decentralized in the industry. Real world data comes from knowledge and knowledge comes from collecting information and of course, information stems from aggregating disparate data."</p><p>"Machine learning today, especially in a life science setting, is leveraged as new ways right to garner new biological insights."</p><p>"One of the things that we are also doing is not just about adopting and using (ML and NLP), we strongly believe that we want to share our work. And that would not only raise and mainstream the work of everybody doing it, but also it'll help us in adopting and applying in precision medicine through standards."</p><p>"Now not all data is needed equal. When we can improve the way data is collected, connected, analyzed, and consumed, we can not only improve the lives of our community, but it also gives us a way to look at the care continuum very differently."</p><p>"There's no guarantee when you get into an initiative which uses machine learning and AI, because it cannot be successful. It has to be a learning experience, but it, there's no guarantee that it will be  successful. And there needs to be willingness and appetite to experiment, learn, and iterate, and taking a Socratic approach, and accelerate the journey towards success, anchor down the culture."</p><p><strong>LINKS:<br></strong><a href="https://syapse.com/">Syapse</a><br><a href="https://www.linkedin.com/in/vinod-subramanian-34a3ba/">Vinod Subramanian</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I talk with Vinod Subramanian, Chief Data and Product Development Officer at Syapse, about machine learning for healthcare and advancements in cancer treatment. Syapse is a real world evidence company dedicated to improving outcomes for cancer and other serious diseases. Vinod and I talked about the types of healthcare data they work with, the data challenges they encounter, how they validate their models, and how they mitigate bias.</p><p><strong>QUOTES:<br></strong><em>"Technology is not the answer exemplified of the intent. And the fundamental question, I think, that all of us are confronted by: what is the intent and what in the world that we want to try to help shape?"</em></p><p>"There are infinite possibilities in the terms of patient care with aggregated and harmonized data in healthcare. We all know about the point that data in general is fragmented and decentralized in the industry. Real world data comes from knowledge and knowledge comes from collecting information and of course, information stems from aggregating disparate data."</p><p>"Machine learning today, especially in a life science setting, is leveraged as new ways right to garner new biological insights."</p><p>"One of the things that we are also doing is not just about adopting and using (ML and NLP), we strongly believe that we want to share our work. And that would not only raise and mainstream the work of everybody doing it, but also it'll help us in adopting and applying in precision medicine through standards."</p><p>"Now not all data is needed equal. When we can improve the way data is collected, connected, analyzed, and consumed, we can not only improve the lives of our community, but it also gives us a way to look at the care continuum very differently."</p><p>"There's no guarantee when you get into an initiative which uses machine learning and AI, because it cannot be successful. It has to be a learning experience, but it, there's no guarantee that it will be  successful. And there needs to be willingness and appetite to experiment, learn, and iterate, and taking a Socratic approach, and accelerate the journey towards success, anchor down the culture."</p><p><strong>LINKS:<br></strong><a href="https://syapse.com/">Syapse</a><br><a href="https://www.linkedin.com/in/vinod-subramanian-34a3ba/">Vinod Subramanian</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 16 Jan 2023 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/033e5b82/654f2b58.mp3" length="34461111" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/kLIwT0Rz9H-i9YyaNM0-J-nNkf29pPOrSyL1SRWXNzE/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzExNjc2NTIv/MTY3MzgwNDQzMC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>2152</itunes:duration>
      <itunes:summary>In this episode, I talk with Vinod Subramanian, Chief Data and Product Development Officer at Syapse, about machine learning for healthcare and advancements in cancer treatment. Syapse is a real world evidence company dedicated to improving outcomes for cancer and other serious diseases. Vinod and I talked about the types of healthcare data they work with, the data challenges they encounter, how they validate their models, and how they mitigate bias.</itunes:summary>
      <itunes:subtitle>In this episode, I talk with Vinod Subramanian, Chief Data and Product Development Officer at Syapse, about machine learning for healthcare and advancements in cancer treatment. Syapse is a real world evidence company dedicated to improving outcomes for c</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Ecological Restoration with Patrick Leung from Earthshot Labs</title>
      <itunes:episode>14</itunes:episode>
      <podcast:episode>14</podcast:episode>
      <itunes:title>Ecological Restoration with Patrick Leung from Earthshot Labs</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">921b46b5-e4c8-49fb-9c77-9c7dd4e72f52</guid>
      <link>https://pixelscientia.com/podcast/ecological-restoration-with-patrick-leung-from-earthshot-labs/</link>
      <description>
        <![CDATA[<p>In this episode, I talk with Patrick Leung, Co-founder and CTO of Earthshot Labs, about using machine learning to help predict and restore forests and our ecosystem. Earthshot Labs is building the technology and expert guidance to develop and finance nature-based carbon projects globally. Patrick and I talked about how Earthshot Labs gathers and annotates data, the challenges in working with remote sensing and other forms of data, the importance of collaboration across disciplines, and how machine learning tools can help save our ecosystems.</p><p><strong>QUOTES:<br></strong><em>"We are able to actually bridge that financing gap and unlock a whole bunch of new projects that can then be in the carbon marketplace, and also bring a host of benefits to both the ecosystem, as well as, the communities that live around the ecosystem."</em></p><p>"Machine learning is really essential because what we're trying to do here is predict the future. We're trying to predict the next 30 years of a forest regrowing in a tropical region."</p><p>"We must look at the past. We must look at whatever data we can gather from the past state of the ecosystem and use various machine learning methods to predict the future in order to provide a view on what's gonna happen on this land in the future when we do this project."</p><p>"These are actual mathematical simulations that take into account the current conditions of the ecosystem and actually forecast them by using a kind of simulation that incorporates photosynthesis and evapotranspiration and other forms of ecological processes."</p><p>"They would look at historical flood maps and essentially combine them with flood forecasting models i order to generate what is a given area going to look like if it gets flooded in the future because of climate change or for other reasons. And I was very enamored with that. I thought that was a very, very clever use of a technology."</p><p>"I think what we're doing definitely encompasses biodiverse native ecosystems and just restoring as many of them as we can throughout the most critical parts of the biosphere, that there are in this world. And also helping to switch our societal systems into more of a harmonious, and regenerative relationship with those ecosystems."<strong></strong></p><p>LINKS:<br><a href="https://www.earthshot.eco/">Earthshot Labs</a><br><a href="https://www.linkedin.com/in/puiwah">Patrick Leung</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I talk with Patrick Leung, Co-founder and CTO of Earthshot Labs, about using machine learning to help predict and restore forests and our ecosystem. Earthshot Labs is building the technology and expert guidance to develop and finance nature-based carbon projects globally. Patrick and I talked about how Earthshot Labs gathers and annotates data, the challenges in working with remote sensing and other forms of data, the importance of collaboration across disciplines, and how machine learning tools can help save our ecosystems.</p><p><strong>QUOTES:<br></strong><em>"We are able to actually bridge that financing gap and unlock a whole bunch of new projects that can then be in the carbon marketplace, and also bring a host of benefits to both the ecosystem, as well as, the communities that live around the ecosystem."</em></p><p>"Machine learning is really essential because what we're trying to do here is predict the future. We're trying to predict the next 30 years of a forest regrowing in a tropical region."</p><p>"We must look at the past. We must look at whatever data we can gather from the past state of the ecosystem and use various machine learning methods to predict the future in order to provide a view on what's gonna happen on this land in the future when we do this project."</p><p>"These are actual mathematical simulations that take into account the current conditions of the ecosystem and actually forecast them by using a kind of simulation that incorporates photosynthesis and evapotranspiration and other forms of ecological processes."</p><p>"They would look at historical flood maps and essentially combine them with flood forecasting models i order to generate what is a given area going to look like if it gets flooded in the future because of climate change or for other reasons. And I was very enamored with that. I thought that was a very, very clever use of a technology."</p><p>"I think what we're doing definitely encompasses biodiverse native ecosystems and just restoring as many of them as we can throughout the most critical parts of the biosphere, that there are in this world. And also helping to switch our societal systems into more of a harmonious, and regenerative relationship with those ecosystems."<strong></strong></p><p>LINKS:<br><a href="https://www.earthshot.eco/">Earthshot Labs</a><br><a href="https://www.linkedin.com/in/puiwah">Patrick Leung</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 09 Jan 2023 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/ec1b5d6b/0ed96f5b.mp3" length="27103324" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/Mw_AyjEmXxegQD1AMS7V3LH0ATri_d-sjmU1khudqZA/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzExNjAyMjkv/MTY3MzIyMjM1MC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1688</itunes:duration>
      <itunes:summary>In this episode, I talk with Patrick Leung, Co-founder and CTO of Earthshot Labs, about using machine learning to help predict and restore forests and our ecosystem. Earthshot Labs is building the technology and expert guidance to develop and finance nature-based carbon projects globally. Patrick and I talked about how Earthshot Labs gathers and annotates data, the challenges in working with remote sensing and other forms of data, the importance of collaboration across disciplines, and how machine learning tools can help save our ecosystems.</itunes:summary>
      <itunes:subtitle>In this episode, I talk with Patrick Leung, Co-founder and CTO of Earthshot Labs, about using machine learning to help predict and restore forests and our ecosystem. Earthshot Labs is building the technology and expert guidance to develop and finance natu</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/ec1b5d6b/transcript.txt" type="text/plain"/>
    </item>
    <item>
      <title>Personalized Physiology Analytics with Matt Pipke from physIQ</title>
      <itunes:episode>13</itunes:episode>
      <podcast:episode>13</podcast:episode>
      <itunes:title>Personalized Physiology Analytics with Matt Pipke from physIQ</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">b7375e2c-955a-4188-aed2-e8a27a7b160d</guid>
      <link>https://pixelscientia.com/podcast/personalized-physiology-analytics-with-matt-pipke-from-physiq/</link>
      <description>
        <![CDATA[<p>In this episode, I talk with Matt Pipke, Co-founder and Chief Digital Health Officer of physIQ, about personalized medical predictions from physiology data. Matt and I talked about the challenges in working with physiology data, how to validate models and minimize bias, and the importance of collaboration.</p><p><br></p><p><strong>Quotes:</strong></p><p>“What physIQ does is it harvests data from those continuous data streams from wearable sensors and produces analytical results that are useful for clinical care when taking care of patients who are outside the four walls of the hospital and in scientific endeavors such as clinical trials where it's interesting to know what the efficacy of the drug is on a target disease, whether the health of the patients who might take those drugs is being improved or at least is not degrading any further.”</p><p><br></p><p>“What we have to do is build our algorithms and our analytics based on machine learning techniques and, of course, the more recent really successful subgroup of deep neural net algorithms that can sift through this data and can highlight accurately the vital signs of physiology we need to make the assessments available.”</p><p><br></p><p>“So part of the issue there, is to figure out how to differentiate the background variation that's normal for people as they move around in their daily lives from the telltale signs that they may be suffering from a derangement of physiology.”</p><p><br></p><p>“There's a lot of companies and offerings out there that are in the consumer fitness market. They might be appropriate for healthy populations that are looking to track their activity, the amount of sleep that a healthy person might get, but they're really not the right target populations of interest for the medical system or for clinical trials where you have a population that's suffering from a disease that a drug is targeting.”</p><p><br></p><p>“Now I know that a lot of companies out there tend to avoid the regulatory pathway for medical or health or fitness applications, and I don't think that's a good move. . . The FDA experience for us has been at times frustrating of course, as it is for anybody who has to deal with regulations, but at the same time, there is a core of meaningful value add there. Regulatory agencies around the world, FDA included, they have a pretty thankless job. They never get credit for what they do. They only get complained about. But what they're doing is really, really critical to outputting valuable, usable product in the healthcare and medical space.”</p><p><br></p><p>“So bias in models really comes back to the representativeness of your data, right? So if you've got data that's not representing the target users, the target populations that you're going to analyze, you can end up with bias. You can end up with bias in surprising ways.”</p><p><br></p><p>“If you aren't aware of what might be lurking in your data, you could be overfitting the wrong thing and then find out that your algorithm does not generalize, does not work in other areas.”</p><p><br></p><p>“My feeling about this is that it's all about the data. physIQ got started a lot earlier than we probably should have and we've benefited in a strange way in that we've been in the game a lot longer than other players in this space. So we've been collecting data for a long time and we built a robust platform to collect data.”</p><p><br></p><p>“There's a lot of resistance to change and, in fact, the layperson might be horrified to learn how the healthcare system actually works. But, stepping back, something definitely has to change in healthcare. We all know that it's not sustainable the way things are now. But we don't have any illusions at physIQ about how a little company like ours can change things by ourselves. It's really about timing. Right. And sometimes you have to look for those windows of opportunity when in large industries with huge amounts of existing business relationships and the way that they work today are ready for change.”</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.physiq.com/">physIQ</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I talk with Matt Pipke, Co-founder and Chief Digital Health Officer of physIQ, about personalized medical predictions from physiology data. Matt and I talked about the challenges in working with physiology data, how to validate models and minimize bias, and the importance of collaboration.</p><p><br></p><p><strong>Quotes:</strong></p><p>“What physIQ does is it harvests data from those continuous data streams from wearable sensors and produces analytical results that are useful for clinical care when taking care of patients who are outside the four walls of the hospital and in scientific endeavors such as clinical trials where it's interesting to know what the efficacy of the drug is on a target disease, whether the health of the patients who might take those drugs is being improved or at least is not degrading any further.”</p><p><br></p><p>“What we have to do is build our algorithms and our analytics based on machine learning techniques and, of course, the more recent really successful subgroup of deep neural net algorithms that can sift through this data and can highlight accurately the vital signs of physiology we need to make the assessments available.”</p><p><br></p><p>“So part of the issue there, is to figure out how to differentiate the background variation that's normal for people as they move around in their daily lives from the telltale signs that they may be suffering from a derangement of physiology.”</p><p><br></p><p>“There's a lot of companies and offerings out there that are in the consumer fitness market. They might be appropriate for healthy populations that are looking to track their activity, the amount of sleep that a healthy person might get, but they're really not the right target populations of interest for the medical system or for clinical trials where you have a population that's suffering from a disease that a drug is targeting.”</p><p><br></p><p>“Now I know that a lot of companies out there tend to avoid the regulatory pathway for medical or health or fitness applications, and I don't think that's a good move. . . The FDA experience for us has been at times frustrating of course, as it is for anybody who has to deal with regulations, but at the same time, there is a core of meaningful value add there. Regulatory agencies around the world, FDA included, they have a pretty thankless job. They never get credit for what they do. They only get complained about. But what they're doing is really, really critical to outputting valuable, usable product in the healthcare and medical space.”</p><p><br></p><p>“So bias in models really comes back to the representativeness of your data, right? So if you've got data that's not representing the target users, the target populations that you're going to analyze, you can end up with bias. You can end up with bias in surprising ways.”</p><p><br></p><p>“If you aren't aware of what might be lurking in your data, you could be overfitting the wrong thing and then find out that your algorithm does not generalize, does not work in other areas.”</p><p><br></p><p>“My feeling about this is that it's all about the data. physIQ got started a lot earlier than we probably should have and we've benefited in a strange way in that we've been in the game a lot longer than other players in this space. So we've been collecting data for a long time and we built a robust platform to collect data.”</p><p><br></p><p>“There's a lot of resistance to change and, in fact, the layperson might be horrified to learn how the healthcare system actually works. But, stepping back, something definitely has to change in healthcare. We all know that it's not sustainable the way things are now. But we don't have any illusions at physIQ about how a little company like ours can change things by ourselves. It's really about timing. Right. And sometimes you have to look for those windows of opportunity when in large industries with huge amounts of existing business relationships and the way that they work today are ready for change.”</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.physiq.com/">physIQ</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 02 Jan 2023 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/9e2cbfaa/9f20f9cd.mp3" length="32197473" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/UTyQHTdFqeyxIdnRFbEBlanE5uGbtlZb8EiyCKzVHDc/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzExNDkyODMv/MTY3MjM2MDc4My1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>2010</itunes:duration>
      <itunes:summary>In this episode, I talk with Matt Pipke, Co-founder and Chief Digital Health Officer of physIQ, about personalized medical predictions from physiology data. Matt and I talked about the challenges in working with physiology data, how to validate models and minimize bias, and the importance of collaboration.</itunes:summary>
      <itunes:subtitle>In this episode, I talk with Matt Pipke, Co-founder and Chief Digital Health Officer of physIQ, about personalized medical predictions from physiology data. Matt and I talked about the challenges in working with physiology data, how to validate models and</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/9e2cbfaa/transcript.vtt" type="text/vtt" rel="captions"/>
    </item>
    <item>
      <title>Early Cancer Detection with Emi Gal from Ezra</title>
      <itunes:episode>12</itunes:episode>
      <podcast:episode>12</podcast:episode>
      <itunes:title>Early Cancer Detection with Emi Gal from Ezra</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">50cc89ce-4294-4dca-8ffb-9848d06035dc</guid>
      <link>https://pixelscientia.com/podcast/early-cancer-detection-with-emi-gal-from-ezra/</link>
      <description>
        <![CDATA[<p>In this episode, I talk with Emi Gal, co-founder and CEO of Ezra, about cancer screening with a full body MRI scan. Ezra is on a mission to detect cancer early for everyone by making the process more accurate, faster, and cheaper. Emi and I talked about the challenges in working with MR data, how regulatory processes affect model development, and the importance of validation.</p><p><br></p><p><strong>Quotes:</strong></p><p>“What we've been able to achieve is to essentially reduce the cost and the time in a scanner of an MRI from about two to three hours for full body to 60 minutes. And we're actually working on a new AI that will roll out next year that will reduce the scan time to 30 minutes.”</p><p><br></p><p>“What we do is we acquire the scan fewer times, and then we've built machine learning models that recognize what noise looks like and then just remove that noise. And then we kind of expanded that from not just noise. If you acquire scans with lower resolution, the resulting images are a little bit blurry so we can sharpen them.”</p><p><br></p><p>“Our focus on the scanning front is to reduce scan time, which yields these images with increased noise artifacts, and then use machine learning to enhance these images so that a radiologist can then use them for interpretation.”</p><p><br></p><p>“I think what having to receive FDA clearance for AI does, is it really forces the company from day one to think about what are all of the things that might influence the performance of said AI, and what can we do to ensure that we maximize the chances of success?”</p><p><br></p><p>“We have had an instance when we had to go back to the drawing board and build the model again because we failed internal validation prior to formal validation that we had to submit to the FDA.”</p><p><br></p><p>“I think the way you ensure that the technology we develop fits the clinical workflow is actually not starting with the technology, but starting with the end goal in mind and then figuring out what you need to do in order to achieve that.”</p><p><br></p><p>“To screen a hundred million people a year, we think, is a huge endeavor and probably going to take a decade or two to achieve. And I'm personally committed to Ezra for the rest of my career.  In the next three to five years, I would hope we are making good progress towards that mission, and maybe in five years we're screening at least a million people a year.”</p><p><br></p><p><strong>Links</strong>:</p><p><a href="https://ezra.com/">Ezra</a></p><p><a href="https://www.emigal.com/">Emi Gal</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I talk with Emi Gal, co-founder and CEO of Ezra, about cancer screening with a full body MRI scan. Ezra is on a mission to detect cancer early for everyone by making the process more accurate, faster, and cheaper. Emi and I talked about the challenges in working with MR data, how regulatory processes affect model development, and the importance of validation.</p><p><br></p><p><strong>Quotes:</strong></p><p>“What we've been able to achieve is to essentially reduce the cost and the time in a scanner of an MRI from about two to three hours for full body to 60 minutes. And we're actually working on a new AI that will roll out next year that will reduce the scan time to 30 minutes.”</p><p><br></p><p>“What we do is we acquire the scan fewer times, and then we've built machine learning models that recognize what noise looks like and then just remove that noise. And then we kind of expanded that from not just noise. If you acquire scans with lower resolution, the resulting images are a little bit blurry so we can sharpen them.”</p><p><br></p><p>“Our focus on the scanning front is to reduce scan time, which yields these images with increased noise artifacts, and then use machine learning to enhance these images so that a radiologist can then use them for interpretation.”</p><p><br></p><p>“I think what having to receive FDA clearance for AI does, is it really forces the company from day one to think about what are all of the things that might influence the performance of said AI, and what can we do to ensure that we maximize the chances of success?”</p><p><br></p><p>“We have had an instance when we had to go back to the drawing board and build the model again because we failed internal validation prior to formal validation that we had to submit to the FDA.”</p><p><br></p><p>“I think the way you ensure that the technology we develop fits the clinical workflow is actually not starting with the technology, but starting with the end goal in mind and then figuring out what you need to do in order to achieve that.”</p><p><br></p><p>“To screen a hundred million people a year, we think, is a huge endeavor and probably going to take a decade or two to achieve. And I'm personally committed to Ezra for the rest of my career.  In the next three to five years, I would hope we are making good progress towards that mission, and maybe in five years we're screening at least a million people a year.”</p><p><br></p><p><strong>Links</strong>:</p><p><a href="https://ezra.com/">Ezra</a></p><p><a href="https://www.emigal.com/">Emi Gal</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 19 Dec 2022 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/e975d37d/23ffc220.mp3" length="23032862" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/h1loMyE97uLqzHjw8JQGEIKtSbOlwIhOSb5Zrfr1FE0/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzExMzczODIv/MTY3MTEzOTA3OS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1437</itunes:duration>
      <itunes:summary>In this episode, I talk with Emi Gal, co-founder and CEO of Ezra, about cancer screening with a full body MRI scan. Ezra is on a mission to detect cancer early for everyone by making the process more accurate, faster, and cheaper. Emi and I talked about the challenges in working with MR data, how regulatory processes affect model development, and the importance of validation.</itunes:summary>
      <itunes:subtitle>In this episode, I talk with Emi Gal, co-founder and CEO of Ezra, about cancer screening with a full body MRI scan. Ezra is on a mission to detect cancer early for everyone by making the process more accurate, faster, and cheaper. Emi and I talked about t</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/e975d37d/transcript.vtt" type="text/vtt" rel="captions"/>
    </item>
    <item>
      <title>Sorting Recyclables with Amanda Marrs from AMP Robotics</title>
      <itunes:episode>11</itunes:episode>
      <podcast:episode>11</podcast:episode>
      <itunes:title>Sorting Recyclables with Amanda Marrs from AMP Robotics</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">22d63ab6-4f0c-415c-bd41-a1d6c680ea36</guid>
      <link>https://pixelscientia.com/podcast/sorting-recyclable-materials-with-amanda-marrs-from-amp-robotics/</link>
      <description>
        <![CDATA[<p>In this episode, I talk with Amanda Marrs, senior director of product at AMP Robotics about modernizing the world’s recycling infrastructure. Amanda and I talked about how they ensure their models work for a diverse set of objects, measuring the success of their technology, and some tips for building a successful ML team.</p><p><br></p><p><strong>Quotes:</strong></p><p>“At AMP we have a broad mission of enabling a world without waste.”</p><p><br></p><p>“We work backwards on everything that ends up in a landfill to develop the technology we need to keep that from happening.”</p><p><br></p><p>“We really have two main areas that we work in. One is technology that we will put in place at a material recovery facility. . . The other half of what we do at AMP is use our own technology for what's called a secondary sortation facility.”</p><p><br></p><p>“All of this technology really has three main components. You have to be able to see the objects on the belt, and that's where the machine learning comes in. You have to be able to sort the objects effectively, and there's some ML behind that as well. And then you have to be able to report, see what's happening, and draw conclusions and make decisions and optimize further in the facilities.”</p><p><br></p><p>“A majority of the data fits nicely within these primary categories. But, in AI, typically there's this natural long tail, and we have that as well.”</p><p><br></p><p>“Diversity is the name of the game in this industry where you have to be able to recognize everything. And so a huge sample set of data really helps us overcome that.”</p><p><br></p><p>“The wonderful thing about AI, it doesn't get tired, it doesn't get dizzy. And it can keep its inference at the same rate.”</p><p><br></p><p>“What we try to do when we translate this to customers, to non deeply technical folks – they're technical in other ways, but they're not dealing with AI all day – is we really try to translate it to the outcomes.”</p><p><br></p><p>“Start your hiring process early so that you're expecting it might take a while before you really, really need that team member joined, onboarded, trained up and enabled to help deliver on projects.”</p><p><br></p><p>“I think, for us, recruiting and thinking about what mix of talent we really need on a team, it's looking across all of those different areas and building out a team that really compliments each other's skillsets.”</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.amprobotics.com/">AMP Robotics</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I talk with Amanda Marrs, senior director of product at AMP Robotics about modernizing the world’s recycling infrastructure. Amanda and I talked about how they ensure their models work for a diverse set of objects, measuring the success of their technology, and some tips for building a successful ML team.</p><p><br></p><p><strong>Quotes:</strong></p><p>“At AMP we have a broad mission of enabling a world without waste.”</p><p><br></p><p>“We work backwards on everything that ends up in a landfill to develop the technology we need to keep that from happening.”</p><p><br></p><p>“We really have two main areas that we work in. One is technology that we will put in place at a material recovery facility. . . The other half of what we do at AMP is use our own technology for what's called a secondary sortation facility.”</p><p><br></p><p>“All of this technology really has three main components. You have to be able to see the objects on the belt, and that's where the machine learning comes in. You have to be able to sort the objects effectively, and there's some ML behind that as well. And then you have to be able to report, see what's happening, and draw conclusions and make decisions and optimize further in the facilities.”</p><p><br></p><p>“A majority of the data fits nicely within these primary categories. But, in AI, typically there's this natural long tail, and we have that as well.”</p><p><br></p><p>“Diversity is the name of the game in this industry where you have to be able to recognize everything. And so a huge sample set of data really helps us overcome that.”</p><p><br></p><p>“The wonderful thing about AI, it doesn't get tired, it doesn't get dizzy. And it can keep its inference at the same rate.”</p><p><br></p><p>“What we try to do when we translate this to customers, to non deeply technical folks – they're technical in other ways, but they're not dealing with AI all day – is we really try to translate it to the outcomes.”</p><p><br></p><p>“Start your hiring process early so that you're expecting it might take a while before you really, really need that team member joined, onboarded, trained up and enabled to help deliver on projects.”</p><p><br></p><p>“I think, for us, recruiting and thinking about what mix of talent we really need on a team, it's looking across all of those different areas and building out a team that really compliments each other's skillsets.”</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.amprobotics.com/">AMP Robotics</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 12 Dec 2022 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/a578041c/88aa2d1b.mp3" length="20431520" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/uBPoPn9QsEhqL6xDX3uc6MfiyOIviFWjvthDOTCcSl8/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzExMzEwNTkv/MTY3MDc5NDA3OS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1275</itunes:duration>
      <itunes:summary>In this episode, I talk with Amanda Marrs, senior director of product at AMP Robotics about modernizing the world’s recycling infrastructure. Amanda and I talked about how they ensure their models work for a diverse set of objects, how they measure the success of their technology, and some tips for building a successful ML team.</itunes:summary>
      <itunes:subtitle>In this episode, I talk with Amanda Marrs, senior director of product at AMP Robotics about modernizing the world’s recycling infrastructure. Amanda and I talked about how they ensure their models work for a diverse set of objects, how they measure the su</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/a578041c/transcript.vtt" type="text/vtt" rel="captions"/>
    </item>
    <item>
      <title>Cell Sorting with Mahyar Salek from Deepcell</title>
      <itunes:episode>10</itunes:episode>
      <podcast:episode>10</podcast:episode>
      <itunes:title>Cell Sorting with Mahyar Salek from Deepcell</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">13c0bdfe-e0b4-4f7a-a8c2-ae5e2c534458</guid>
      <link>https://pixelscientia.com/podcast/cell-sorting-with-mahyar-salek-from-deepcell/</link>
      <description>
        <![CDATA[<p>In this episode, I talk with Mahyar Salek, co-founder and CTO of Deepcell, about an AI powered technology for single cell analysis through the lense of high content cell morphology.  Deepcell's platform blends deep learning, microfluidics, and high resolution optics to deliver novel insights about cell biology and has the capability to sort, label-free for downstream multi-omic and functional analysis for use in research, translational studies, and therapeutic research.  We discussed some of the challenges and opportunities in working with single cell images and how they used self-supervised learning.</p><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“We really use the power of computer vision and AI capabilities combined with the advances in microfluidics and imaging to create this high dimensional, high content interpretation of single cell images. And we use that in real time to purify and separate cells of interest.”</p><p><br></p><p>“We have to see millions of cells even in just one go, one run. So you can't really do that without the scalability of an algorithm, right? And then we have to be consistent and robust.”</p><p><br></p><p>“When I hear challenges, I equate them with opportunities and I'll tell you why. So, for instance, one of the challenges, not just with us, but any sort of AI solution that looks at biological samples is the susceptibility to artifacts.”</p><p><br></p><p>“But as soon as you roll it out, there's a difference between your lab and the lab, you know, a block down the road because of the artifacts. So it's artifacts are definitely challenging, but for us, it's an opportunity as I mentioned, because we generate the data through our own platform and that means that we have a very controlled environment.”</p><p><br></p><p>“Because, again, we have the full control over the imaging path and where the cells lie, where we image them, we could actually do these sort of things and come up with models that are very less reliant on labels.”</p><p><br></p><p>“By being able to run a biological assay and validate whether the existing model, like basically errors in the existing models and existing labels, and that way you're able to iterate very quickly on your learning without even relying on arguably erroneous human labels, erroneous and obviously expensive human labels.”</p><p><br></p><p>“Any modern life science companies that rely on data, you have to have a very tight collaboration between machine learning and data scientists and the domain experts.”</p><p><br></p><p>“It is really important to, as you kind of come up with a development strategy and the product strategy, understand where you could rely on AI today versus where you hope that the AI could deliver, you know, two years down the road.”</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://deepcell.com/">Deepcell</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I talk with Mahyar Salek, co-founder and CTO of Deepcell, about an AI powered technology for single cell analysis through the lense of high content cell morphology.  Deepcell's platform blends deep learning, microfluidics, and high resolution optics to deliver novel insights about cell biology and has the capability to sort, label-free for downstream multi-omic and functional analysis for use in research, translational studies, and therapeutic research.  We discussed some of the challenges and opportunities in working with single cell images and how they used self-supervised learning.</p><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“We really use the power of computer vision and AI capabilities combined with the advances in microfluidics and imaging to create this high dimensional, high content interpretation of single cell images. And we use that in real time to purify and separate cells of interest.”</p><p><br></p><p>“We have to see millions of cells even in just one go, one run. So you can't really do that without the scalability of an algorithm, right? And then we have to be consistent and robust.”</p><p><br></p><p>“When I hear challenges, I equate them with opportunities and I'll tell you why. So, for instance, one of the challenges, not just with us, but any sort of AI solution that looks at biological samples is the susceptibility to artifacts.”</p><p><br></p><p>“But as soon as you roll it out, there's a difference between your lab and the lab, you know, a block down the road because of the artifacts. So it's artifacts are definitely challenging, but for us, it's an opportunity as I mentioned, because we generate the data through our own platform and that means that we have a very controlled environment.”</p><p><br></p><p>“Because, again, we have the full control over the imaging path and where the cells lie, where we image them, we could actually do these sort of things and come up with models that are very less reliant on labels.”</p><p><br></p><p>“By being able to run a biological assay and validate whether the existing model, like basically errors in the existing models and existing labels, and that way you're able to iterate very quickly on your learning without even relying on arguably erroneous human labels, erroneous and obviously expensive human labels.”</p><p><br></p><p>“Any modern life science companies that rely on data, you have to have a very tight collaboration between machine learning and data scientists and the domain experts.”</p><p><br></p><p>“It is really important to, as you kind of come up with a development strategy and the product strategy, understand where you could rely on AI today versus where you hope that the AI could deliver, you know, two years down the road.”</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://deepcell.com/">Deepcell</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 05 Dec 2022 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/2fedc66c/c8d6ec45.mp3" length="30812397" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/LYPlvkyowQokVUzw0ScZ3uhZuVbI7GcN_GlL93tUYQw/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzExMTc4MTQv/MTY3MDAxNzQxNS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1924</itunes:duration>
      <itunes:summary>I talk with Mahyar Salek, co-founder and CTO of Deepcell, about an AI powered technology for single cell analysis through the lense of high content cell morphology.  Deepcell's platform blends deep learning, microfluidics, and high resolution optics to deliver novel insights about cell biology and has the capability to sort, label-free for downstream multi-omic and functional analysis for use in research, translational studies, and therapeutic research.  We discussed some of the challenges and opportunities in working with single cell images and how they used self-supervised learning.</itunes:summary>
      <itunes:subtitle>I talk with Mahyar Salek, co-founder and CTO of Deepcell, about an AI powered technology for single cell analysis through the lense of high content cell morphology.  Deepcell's platform blends deep learning, microfluidics, and high resolution optics to de</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Data-driven Pathology with Coleman Stavish and Julianna Ianni from Proscia</title>
      <itunes:episode>9</itunes:episode>
      <podcast:episode>9</podcast:episode>
      <itunes:title>Data-driven Pathology with Coleman Stavish and Julianna Ianni from Proscia</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">e30de627-985b-41e6-80e2-f07ae1c73261</guid>
      <link>https://pixelscientia.com/podcast/data-driven-pathology-with-coleman-stavish-and-julianna-ianni-from-proscia/</link>
      <description>
        <![CDATA[<p>In this episode, I talk with Coleman Stavish and Julianna Ianni from Proscia about data-driven pathology. Coleman is the co-founder and CTO of Proscia and Julianna is the VP of AI Research &amp; Development. We discussed the importance of quality control systems in an ML pipeline, model generalizability, and how the regulatory process affects ML development.</p><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“Better accuracy in diagnosis means less overdiagnosis and less under diagnosis, which typically leads to better patient outcomes and quality of life.”</p><p><br></p><p>“Pathology is crucial in the drug development pipeline. It's helping pharmaceutical companies develop new treatments while assessing their safety and efficacy.”</p><p><br></p><p>“You'll often find slides that have been annotated with pen ink. That's something that can be quite common to do in some settings and that, if you're trying to train a diagnostic model, can really bias the model.”</p><p><br></p><p>“One of the heaviest impacts to development for us, just to give you an example, has been areas where we find a great level of disagreement in the ground truth data. So that will come out when you test, and we have to account for that disagreement during development.”</p><p><br></p><p>“It also requires thinking through, not just how are we going to validate, but then how are we going to keep tabs on the different deployments and ensure that we're not seeing performance degrade as maybe the data or the conditions within the laboratory change.”</p><p><br></p><p>“No matter how accurate or how valuable that information is that's produced by the model, if it's not actually introduced in the right way into the overall workflow, it's not going to be put into routine use.”</p><p><br></p><p>“Prepare to iterate. A solution that you build is probably not going to be the final destination, the final solution. And I think the fast pace of this field kind of demands some constant innovation.”</p><p><br></p><p>“I'd also say to heavily invest in your team. There's really nothing that replaces having good people and very skilled people working for you and building these AI products.”</p><p><br></p><p>“Something that we've learned ourselves is how to balance the investor pitch about AI and its potential with the near and immediate term. Smaller successes that build you a road to that more ambitious future.”</p><p><br></p><p>“They could have the ability to diagnose cases remotely without having and maybe assisting patients who are in far flung areas of the world that may not have access to subspecialty pathologist expertise.”</p><p><br></p><p>“Maybe it means someone gets the right diagnosis a little bit faster in aggregate. I think that could have a really big impact.”</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://proscia.com/">Proscia</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I talk with Coleman Stavish and Julianna Ianni from Proscia about data-driven pathology. Coleman is the co-founder and CTO of Proscia and Julianna is the VP of AI Research &amp; Development. We discussed the importance of quality control systems in an ML pipeline, model generalizability, and how the regulatory process affects ML development.</p><p><br></p><p><strong>Quotes:</strong></p><p><br></p><p>“Better accuracy in diagnosis means less overdiagnosis and less under diagnosis, which typically leads to better patient outcomes and quality of life.”</p><p><br></p><p>“Pathology is crucial in the drug development pipeline. It's helping pharmaceutical companies develop new treatments while assessing their safety and efficacy.”</p><p><br></p><p>“You'll often find slides that have been annotated with pen ink. That's something that can be quite common to do in some settings and that, if you're trying to train a diagnostic model, can really bias the model.”</p><p><br></p><p>“One of the heaviest impacts to development for us, just to give you an example, has been areas where we find a great level of disagreement in the ground truth data. So that will come out when you test, and we have to account for that disagreement during development.”</p><p><br></p><p>“It also requires thinking through, not just how are we going to validate, but then how are we going to keep tabs on the different deployments and ensure that we're not seeing performance degrade as maybe the data or the conditions within the laboratory change.”</p><p><br></p><p>“No matter how accurate or how valuable that information is that's produced by the model, if it's not actually introduced in the right way into the overall workflow, it's not going to be put into routine use.”</p><p><br></p><p>“Prepare to iterate. A solution that you build is probably not going to be the final destination, the final solution. And I think the fast pace of this field kind of demands some constant innovation.”</p><p><br></p><p>“I'd also say to heavily invest in your team. There's really nothing that replaces having good people and very skilled people working for you and building these AI products.”</p><p><br></p><p>“Something that we've learned ourselves is how to balance the investor pitch about AI and its potential with the near and immediate term. Smaller successes that build you a road to that more ambitious future.”</p><p><br></p><p>“They could have the ability to diagnose cases remotely without having and maybe assisting patients who are in far flung areas of the world that may not have access to subspecialty pathologist expertise.”</p><p><br></p><p>“Maybe it means someone gets the right diagnosis a little bit faster in aggregate. I think that could have a really big impact.”</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://proscia.com/">Proscia</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 28 Nov 2022 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/5681abe8/712419b9.mp3" length="29352031" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/pLnnrqHe1XJ3c74gvHuvlxLL5Aa7yRgAv3PnUgy1Scs/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzExMDIxOTMv/MTY2ODg4MzgyMi1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1832</itunes:duration>
      <itunes:summary>I talk with Coleman Stavish and Julianna Ianni from Proscia about data-driven pathology. Coleman is the co-founder and CTO of Proscia and Julianna is the VP of AI Research &amp;amp; Development. We discussed the importance of quality control systems in an ML pipeline, model generalizability, and how the regulatory process affects ML development.</itunes:summary>
      <itunes:subtitle>I talk with Coleman Stavish and Julianna Ianni from Proscia about data-driven pathology. Coleman is the co-founder and CTO of Proscia and Julianna is the VP of AI Research &amp;amp; Development. We discussed the importance of quality control systems in an ML </itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/5681abe8/transcript.vtt" type="text/vtt" rel="captions"/>
    </item>
    <item>
      <title>Biophysical Modeling of Cancer with Joe Peterson from SimBioSys</title>
      <itunes:episode>8</itunes:episode>
      <podcast:episode>8</podcast:episode>
      <itunes:title>Biophysical Modeling of Cancer with Joe Peterson from SimBioSys</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">15fd79e0-8158-4dd0-85d2-1c658cc963d2</guid>
      <link>https://pixelscientia.com/podcast/biophysical-modeling-of-cancer-with-joe-peterson-from-simbiosys/</link>
      <description>
        <![CDATA[<p>In this episode, I talk with Joe Peterson, co-founder and CTO of SimBioSys, about biophysical modeling of cancer. SimBioSys is trying to revolutionize precision cancer care through individualized treatment planning, accelerated drug development, clinical trial optimization, and comprehensive biomarker development. Joe and I talked about the challenges of working with heterogeneous forms of data and the ways bias can manifest when training models on medical data.</p><p><br></p><p><strong>Quotes:</strong></p><p>“We use AI or ML at effectively every point in the process, both in our clinical medical devices, but also for our internal R&amp;D.”</p><p><br></p><p>“Have you ever seen the way weather scientists simulate a hurricane? We do a very similar thing within the body, or if you've ever seen mechanical engineers simulate the combustion of a gas and a gas turbine, we do a similar type of thing within these patient models.”</p><p><br></p><p>“If you're able to distill the processes that go on biologically, chemically and physically to their essence, you can create building blocks that can be mixed and matched.”</p><p><br></p><p>“Our thought was, let's not ask the models to do too much. Let's ask them to do one thing that we need them to do very, very well. This allows us to have more collected data or more directed data collection, as well as more clearly defined goals in terms of business value and delivering business value to each of the models.”</p><p><br></p><p>“All these different types of data are much more heterogeneous. They come from many different scales. They come from many different sources. They're encoded in many different ways, and so there's a huge effort, on the research and development side, just to extract what's meaningful in those different types of data sets so that we can begin to define those biophysical building blocks that ultimately make it into the clinical application.”</p><p><br></p><p>“It's just really about capturing the variability and trying to drive out as much variability up front as you possibly can.”</p><p><br></p><p>“We also develop models that are generally capturing any sort of drift in the data over time.”</p><p><br></p><p>“You wanna understand outside of just a research setting, but out there in the wild how well your models are going to work, how often you're going to return a null result or an inconclusive result to a physician and being able to track that over time is really important from a quality control standpoint.”</p><p><br></p><p>“It's all the quality control machine learning models and deep learning models that make up the bulk of those internally.”</p><p><br></p><p>“Our responsibility as practitioners of AI is to not only identify and understand that bias, that historical bias, but also try to account for it as best we can.”</p><p><br></p><p>“What we need to assess when developing drugs or algorithms or devices is how they were trained, how they were tested, and really stratify those patient populations as best we can to sort of understand, at the very least, how they're behaving.”</p><p><br></p><p>“We've spent a lot of time trying to account for that variability as best we can. That said, we don't have a perfect data set and we're constantly thinking about ways to improve it.”</p><p><br></p><p>“I think what it comes down to is being open and transparent and really looking at the data that you have at the end of the day, If doctors are going to trust medical devices and if they're going to trust AI, they need to have information about.”</p><p><br></p><p>“By looking into and stratifying the patient populations in that way we can better understand where we need to targetedly spend resources to collect potentially more data to better understand the performance in those places or to improve our algorithms.”</p><p><br></p><p>“Adopt good machine learning practices early, just like good clinical practice or good manufacturing practices that are standards that are now being drafted and adopted.”</p><p><br></p><p>“Find the right partners to sort of drive the questions that you're addressing and ultimately the clinical actions that you're trying to address.”</p><p><br></p><p>“Models that are built to do a single task excellently well is a better approach than trying to build a model that does four or five tasks really well.”</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.simbiosys.com/">SimBioSys</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I talk with Joe Peterson, co-founder and CTO of SimBioSys, about biophysical modeling of cancer. SimBioSys is trying to revolutionize precision cancer care through individualized treatment planning, accelerated drug development, clinical trial optimization, and comprehensive biomarker development. Joe and I talked about the challenges of working with heterogeneous forms of data and the ways bias can manifest when training models on medical data.</p><p><br></p><p><strong>Quotes:</strong></p><p>“We use AI or ML at effectively every point in the process, both in our clinical medical devices, but also for our internal R&amp;D.”</p><p><br></p><p>“Have you ever seen the way weather scientists simulate a hurricane? We do a very similar thing within the body, or if you've ever seen mechanical engineers simulate the combustion of a gas and a gas turbine, we do a similar type of thing within these patient models.”</p><p><br></p><p>“If you're able to distill the processes that go on biologically, chemically and physically to their essence, you can create building blocks that can be mixed and matched.”</p><p><br></p><p>“Our thought was, let's not ask the models to do too much. Let's ask them to do one thing that we need them to do very, very well. This allows us to have more collected data or more directed data collection, as well as more clearly defined goals in terms of business value and delivering business value to each of the models.”</p><p><br></p><p>“All these different types of data are much more heterogeneous. They come from many different scales. They come from many different sources. They're encoded in many different ways, and so there's a huge effort, on the research and development side, just to extract what's meaningful in those different types of data sets so that we can begin to define those biophysical building blocks that ultimately make it into the clinical application.”</p><p><br></p><p>“It's just really about capturing the variability and trying to drive out as much variability up front as you possibly can.”</p><p><br></p><p>“We also develop models that are generally capturing any sort of drift in the data over time.”</p><p><br></p><p>“You wanna understand outside of just a research setting, but out there in the wild how well your models are going to work, how often you're going to return a null result or an inconclusive result to a physician and being able to track that over time is really important from a quality control standpoint.”</p><p><br></p><p>“It's all the quality control machine learning models and deep learning models that make up the bulk of those internally.”</p><p><br></p><p>“Our responsibility as practitioners of AI is to not only identify and understand that bias, that historical bias, but also try to account for it as best we can.”</p><p><br></p><p>“What we need to assess when developing drugs or algorithms or devices is how they were trained, how they were tested, and really stratify those patient populations as best we can to sort of understand, at the very least, how they're behaving.”</p><p><br></p><p>“We've spent a lot of time trying to account for that variability as best we can. That said, we don't have a perfect data set and we're constantly thinking about ways to improve it.”</p><p><br></p><p>“I think what it comes down to is being open and transparent and really looking at the data that you have at the end of the day, If doctors are going to trust medical devices and if they're going to trust AI, they need to have information about.”</p><p><br></p><p>“By looking into and stratifying the patient populations in that way we can better understand where we need to targetedly spend resources to collect potentially more data to better understand the performance in those places or to improve our algorithms.”</p><p><br></p><p>“Adopt good machine learning practices early, just like good clinical practice or good manufacturing practices that are standards that are now being drafted and adopted.”</p><p><br></p><p>“Find the right partners to sort of drive the questions that you're addressing and ultimately the clinical actions that you're trying to address.”</p><p><br></p><p>“Models that are built to do a single task excellently well is a better approach than trying to build a model that does four or five tasks really well.”</p><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.simbiosys.com/">SimBioSys</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 14 Nov 2022 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/280b44b8/621e267c.mp3" length="48322391" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/Jtdc1N_ZK-q6l76yVLdtfAFX_kXNcyAPmSH6lIHrnUY/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEwOTUwNDkv/MTY2ODI4NzcxMi1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>3013</itunes:duration>
      <itunes:summary>I talk with Joe Peterson, co-founder and CTO of SimBioSys, about biophysical modeling of cancer. SimBioSys is trying to revolutionize precision cancer care through individualized treatment planning, accelerated drug development, clinical trial optimization, and comprehensive biomarker development.</itunes:summary>
      <itunes:subtitle>I talk with Joe Peterson, co-founder and CTO of SimBioSys, about biophysical modeling of cancer. SimBioSys is trying to revolutionize precision cancer care through individualized treatment planning, accelerated drug development, clinical trial optimizatio</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Smarter Farming with Eric Adamson from Tortuga AgTech</title>
      <itunes:episode>7</itunes:episode>
      <podcast:episode>7</podcast:episode>
      <itunes:title>Smarter Farming with Eric Adamson from Tortuga AgTech</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">7dad1403-a3ec-470a-8d2a-91fb8d8765ed</guid>
      <link>https://pixelscientia.com/podcast/smarter-farming-with-eric-adamson-from-tortuga-agtech/</link>
      <description>
        <![CDATA[<p>In this episode, I talk with Eric Adamson, CEO of Tortuga AgTech, about smarter farming. Tortuga AgTech builds robots for harvesting fruit and vegetables to help farms be more resilient, sustainable, and successful.</p><p><br></p><p><strong>Quotes</strong>:</p><p><br></p><p>“Figuring out that pipeline from someone else's knowledge to the robot knows it is really critical.”</p><p><br></p><p>“If you build technology because the technology is cool or because you can, you are much more likely to fail than if you start with the customer problem and then figure out what kind of technology might help to solve that problem.”</p><p><br></p><p>“That learning happens with our machine learning engineers being in the field, being the ones who are actually taking data with handheld rigs.”</p><p><br></p><p>“Many of our team members’ first two weeks have been immediately flying to a farm and spending time on the farm with the robots, learning a problem in very, very deep detail. And I would encourage anybody building a technology based on machine learning or certainly robots to do the same.”</p><p><br></p><p>“We have a very efficient and effective pipeline that took us years to build. But it's exceptionally powerful for us to be able to, for example, go to a new site, run a couple robots or a small fleet of robots for a day, and then within a week have a brand new model that's been completely retrained on freshly labeled data from this new place.”</p><p><br></p><p>“That’s very critical for us because farm environments are changing so often. You really need to be able to be reactive and continue to improve your models as you develop.”</p><p><br></p><p>“We measure our scores based on golden data sets that we've sort of hand labeled ourselves. But we also have to make some judgment calls about what we really want in our performance versus what the conditions are in the field and what we're seeing on the farm.”</p><p><br></p><p>“We try to convert whatever model results are spit out into language that the customer intuitively understands.”</p><p><br></p><p>“It's really important to start with the customer problem and to start with the customer problem as an economic proposition.”</p><p><br></p><p>“There are already very large discussions happening in the farming community around what type of farming should be used in order to, for example, deal with climate change, to deal with drought, to deal with chemical regulations, to deal with a lowering of fruit quality and an increasing of fruit waste, the challenging labor environments.”</p><p><br></p><p><strong>Links</strong>:</p><p><a href="https://www.tortugaagtech.com/">Tortuga AgTech</a></p><p><a href="https://twitter.com/TortugaAgTech">Twitter</a></p><p><a href="https://www.youtube.com/channel/UCME87CYIK4iaJG8i9MlLfDw/videos">YouTube</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I talk with Eric Adamson, CEO of Tortuga AgTech, about smarter farming. Tortuga AgTech builds robots for harvesting fruit and vegetables to help farms be more resilient, sustainable, and successful.</p><p><br></p><p><strong>Quotes</strong>:</p><p><br></p><p>“Figuring out that pipeline from someone else's knowledge to the robot knows it is really critical.”</p><p><br></p><p>“If you build technology because the technology is cool or because you can, you are much more likely to fail than if you start with the customer problem and then figure out what kind of technology might help to solve that problem.”</p><p><br></p><p>“That learning happens with our machine learning engineers being in the field, being the ones who are actually taking data with handheld rigs.”</p><p><br></p><p>“Many of our team members’ first two weeks have been immediately flying to a farm and spending time on the farm with the robots, learning a problem in very, very deep detail. And I would encourage anybody building a technology based on machine learning or certainly robots to do the same.”</p><p><br></p><p>“We have a very efficient and effective pipeline that took us years to build. But it's exceptionally powerful for us to be able to, for example, go to a new site, run a couple robots or a small fleet of robots for a day, and then within a week have a brand new model that's been completely retrained on freshly labeled data from this new place.”</p><p><br></p><p>“That’s very critical for us because farm environments are changing so often. You really need to be able to be reactive and continue to improve your models as you develop.”</p><p><br></p><p>“We measure our scores based on golden data sets that we've sort of hand labeled ourselves. But we also have to make some judgment calls about what we really want in our performance versus what the conditions are in the field and what we're seeing on the farm.”</p><p><br></p><p>“We try to convert whatever model results are spit out into language that the customer intuitively understands.”</p><p><br></p><p>“It's really important to start with the customer problem and to start with the customer problem as an economic proposition.”</p><p><br></p><p>“There are already very large discussions happening in the farming community around what type of farming should be used in order to, for example, deal with climate change, to deal with drought, to deal with chemical regulations, to deal with a lowering of fruit quality and an increasing of fruit waste, the challenging labor environments.”</p><p><br></p><p><strong>Links</strong>:</p><p><a href="https://www.tortugaagtech.com/">Tortuga AgTech</a></p><p><a href="https://twitter.com/TortugaAgTech">Twitter</a></p><p><a href="https://www.youtube.com/channel/UCME87CYIK4iaJG8i9MlLfDw/videos">YouTube</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 07 Nov 2022 06:00:00 -0500</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/cd15725d/f78a57de.mp3" length="26122421" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/KlDe8gDIFecXbb2wssvrHNZ_XdWdjwLtpG37Q2zD5DE/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEwODA0NjMv/MTY2NzAwOTA4Mi1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1631</itunes:duration>
      <itunes:summary>Eric Adamson, CEO of Tortuga AgTech, about smarter farming. Tortuga AgTech builds robots for harvesting fruit and vegetables to help farms be more resilient, sustainable, and successful.</itunes:summary>
      <itunes:subtitle>Eric Adamson, CEO of Tortuga AgTech, about smarter farming. Tortuga AgTech builds robots for harvesting fruit and vegetables to help farms be more resilient, sustainable, and successful.</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Autonomous Diagnostics with John Bertrand from Digital Diagnostics</title>
      <itunes:episode>6</itunes:episode>
      <podcast:episode>6</podcast:episode>
      <itunes:title>Autonomous Diagnostics with John Bertrand from Digital Diagnostics</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">6a040834-53b8-4b65-a370-32e68f63ffd7</guid>
      <link>https://pixelscientia.com/podcast/autonomous-diagnostics-with-john-bertrand-from-digital-diagnostics/</link>
      <description>
        <![CDATA[<p>In this episode, I talk with John Bertrand, CEO of Digital Diagnostics, about autonomous diagnostics. Digital Diagnostics transforms the quality, accessibility, equity, and affordability of healthcare with AI-powered diagnostics. They developed the first FDA-cleared autonomous AI system.</p><p><br></p><p><strong>Quotes</strong>:</p><p>“So we look for diagnostics where there's an established understanding of what the disease is and there's a gold standard as to how to measure that.”</p><p><br></p><p>“We'll naturally start with an area where positive and negative is a very binary decision that is almost mathematically derived.”</p><p><br></p><p>“It goes back to picking the right types of disease states to make sure that the gold standard already exists.”</p><p><br></p><p>“How do you take images that have different coverage of the retina but make sure that you piece them together in a way that the processing part of the system is getting a consistent image that they're looking at every single time so that the algorithm remains consistent and we don't have to have different algorithms per vendor that we're interacting with.”</p><p><br></p><p>“We’re pretty proud of the fact we’ve been able to do that first kind of assistive feedback for the provider.”</p><p><br></p><p>“We want every single patient, regardless of their background, to receive consistent quality of diagnostic output. What that means is that we actually have to build our training data sets as well as our clinical validation studies and trials to take into account a diverse population set.”</p><p><br></p><p>“Continuous learning versus locked algorithms is another key factor. . . Would you really want that algorithm to adjust to the most recent data it's seeing, thinking it's attempting to become more accurate, when in fact it's really more optimizing for the ethnicity of the folks in that particular region, the sun rises on the east coast to the United States, everybody further east goes to bed. Now the algorithm’s been indexed towards another group from a ethnicity perspective, that’s no longer representative of where the testing’s being done as the sun rises in New York.”</p><p><br></p><p>“How do we ensure that we create confidence with regulators, with providers, and with patients that we've actually thought through this?”</p><p><br></p><p>“We can literally break down for you what the computer saw, why graded it out what it did, and why it gave you the results it did.”</p><p><br></p><p>“Your algorithm should be explainable so people trust the technology, understand how it works.”</p><p><br></p><p>“Also explainability helps you drive better accuracy and that you understand why you're getting the result that you're getting with the black box approach.”</p><p><br></p><p>“You really want to work within the healthcare system when you’re building these types of businesses.”</p><p><br></p><p>“If you're going to chart that course and really carry through to fruition, your vision of building an algorithm that impacts patient lives, I think you really need to center the culture of the business around a commonly shared vision for the mission of what you're trying to do.”</p><p><br></p><p><strong>Links</strong>:</p><p><a href="https://www.digitaldiagnostics.com/">Digital Diagnostics</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I talk with John Bertrand, CEO of Digital Diagnostics, about autonomous diagnostics. Digital Diagnostics transforms the quality, accessibility, equity, and affordability of healthcare with AI-powered diagnostics. They developed the first FDA-cleared autonomous AI system.</p><p><br></p><p><strong>Quotes</strong>:</p><p>“So we look for diagnostics where there's an established understanding of what the disease is and there's a gold standard as to how to measure that.”</p><p><br></p><p>“We'll naturally start with an area where positive and negative is a very binary decision that is almost mathematically derived.”</p><p><br></p><p>“It goes back to picking the right types of disease states to make sure that the gold standard already exists.”</p><p><br></p><p>“How do you take images that have different coverage of the retina but make sure that you piece them together in a way that the processing part of the system is getting a consistent image that they're looking at every single time so that the algorithm remains consistent and we don't have to have different algorithms per vendor that we're interacting with.”</p><p><br></p><p>“We’re pretty proud of the fact we’ve been able to do that first kind of assistive feedback for the provider.”</p><p><br></p><p>“We want every single patient, regardless of their background, to receive consistent quality of diagnostic output. What that means is that we actually have to build our training data sets as well as our clinical validation studies and trials to take into account a diverse population set.”</p><p><br></p><p>“Continuous learning versus locked algorithms is another key factor. . . Would you really want that algorithm to adjust to the most recent data it's seeing, thinking it's attempting to become more accurate, when in fact it's really more optimizing for the ethnicity of the folks in that particular region, the sun rises on the east coast to the United States, everybody further east goes to bed. Now the algorithm’s been indexed towards another group from a ethnicity perspective, that’s no longer representative of where the testing’s being done as the sun rises in New York.”</p><p><br></p><p>“How do we ensure that we create confidence with regulators, with providers, and with patients that we've actually thought through this?”</p><p><br></p><p>“We can literally break down for you what the computer saw, why graded it out what it did, and why it gave you the results it did.”</p><p><br></p><p>“Your algorithm should be explainable so people trust the technology, understand how it works.”</p><p><br></p><p>“Also explainability helps you drive better accuracy and that you understand why you're getting the result that you're getting with the black box approach.”</p><p><br></p><p>“You really want to work within the healthcare system when you’re building these types of businesses.”</p><p><br></p><p>“If you're going to chart that course and really carry through to fruition, your vision of building an algorithm that impacts patient lives, I think you really need to center the culture of the business around a commonly shared vision for the mission of what you're trying to do.”</p><p><br></p><p><strong>Links</strong>:</p><p><a href="https://www.digitaldiagnostics.com/">Digital Diagnostics</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 31 Oct 2022 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/f564aa06/787790fc.mp3" length="20713195" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/1zhUlVuIeZZBuw-u_dG-3B6ilumZpMJ4D1k-6-lO4wI/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEwODAyNzQv/MTY2Njk4OTAyMS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1293</itunes:duration>
      <itunes:summary>John Bertrand, CEO of Digital Diagnostics, about autonomous diagnostics. Digital Diagnostics transforms the quality, accessibility, equity, and affordability of healthcare with AI-powered diagnostics. They developed the first FDA-cleared autonomous AI system.</itunes:summary>
      <itunes:subtitle>John Bertrand, CEO of Digital Diagnostics, about autonomous diagnostics. Digital Diagnostics transforms the quality, accessibility, equity, and affordability of healthcare with AI-powered diagnostics. They developed the first FDA-cleared autonomous AI sys</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Capturing the Carbon Fingerprint of Soil with David Schurman from Perennial</title>
      <itunes:episode>5</itunes:episode>
      <podcast:episode>5</podcast:episode>
      <itunes:title>Capturing the Carbon Fingerprint of Soil with David Schurman from Perennial</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">f5e86896-aa1f-4cf6-a1b7-296d24049346</guid>
      <link>https://pixelscientia.com/podcast/capturing-the-carbon-fingerprint-of-soil-with-david-schurman-from-perennial/</link>
      <description>
        <![CDATA[<p>In this episode, I talk with David Schurman, co-founder and CTO of Perennial, about their verification platform for climate-smart agriculture. Perennial uses geospatial data and machine learning to unlock agricultural soils as the world’s largest carbon sink.</p><p><br></p><p><strong>Highlights:</strong></p><ul><li>How Perennial gathers and annotates training data from satellites and ground-based observations.</li><li>Handling variations across satellites and geographic locations.</li><li>Stratifying training data across the kinds of variables that matter.</li><li>Collaboration between machine learning engineers, remote sensing scientists, and crop scientists.</li><li>The importance of gathering more training data than you think you’ll need.</li><li>Respecting the data.</li><li>The nuance of communicating performance metrics.</li></ul><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.perennial.earth/">Perennial’s website</a></p><p><a href="https://www.linkedin.com/company/perennial-earth/">Perennial on LinkedIn</a></p><p><a href="https://www.linkedin.com/in/dschurman/">David Schurman on LinkedIn</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I talk with David Schurman, co-founder and CTO of Perennial, about their verification platform for climate-smart agriculture. Perennial uses geospatial data and machine learning to unlock agricultural soils as the world’s largest carbon sink.</p><p><br></p><p><strong>Highlights:</strong></p><ul><li>How Perennial gathers and annotates training data from satellites and ground-based observations.</li><li>Handling variations across satellites and geographic locations.</li><li>Stratifying training data across the kinds of variables that matter.</li><li>Collaboration between machine learning engineers, remote sensing scientists, and crop scientists.</li><li>The importance of gathering more training data than you think you’ll need.</li><li>Respecting the data.</li><li>The nuance of communicating performance metrics.</li></ul><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.perennial.earth/">Perennial’s website</a></p><p><a href="https://www.linkedin.com/company/perennial-earth/">Perennial on LinkedIn</a></p><p><a href="https://www.linkedin.com/in/dschurman/">David Schurman on LinkedIn</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 24 Oct 2022 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/db48aa90/b43bf9dd.mp3" length="22407297" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/TzD-ACqmuZSLalD9OWilrlJvIxcXViaEf0ireMMDYAk/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEwNTQ4OTIv/MTY2NTA4MTA2MS1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1398</itunes:duration>
      <itunes:summary>David Schurman, co-founder and CTO of Perennial, about their verification platform for climate-smart agriculture. Perennial uses geospatial data and machine learning to unlock agricultural soils as the world’s largest carbon sink.</itunes:summary>
      <itunes:subtitle>David Schurman, co-founder and CTO of Perennial, about their verification platform for climate-smart agriculture. Perennial uses geospatial data and machine learning to unlock agricultural soils as the world’s largest carbon sink.</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Biomarker Discovery from Pathology Images with Matt Alderdice from Sonrai Analytics</title>
      <itunes:episode>4</itunes:episode>
      <podcast:episode>4</podcast:episode>
      <itunes:title>Biomarker Discovery from Pathology Images with Matt Alderdice from Sonrai Analytics</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">ec68d217-fc31-4443-aa42-32992e81c8a3</guid>
      <link>https://pixelscientia.com/podcast/biomarker-discovery-from-pathology-images-with-matt-alderdice-from-sonrai-analytics/</link>
      <description>
        <![CDATA[<p>In this episode, I talk with Matt Alderdice, Head of Data Science at Sonrai Analytics, about precision medicine. Sonrai Analytics automates laborious data processes and speeds up new drug and healthcare developments.</p><p><br></p><p><strong>Highlights:</strong></p><ul><li>Machine learning for automating time-consuming and tedious analysis of microscopy images.</li><li>Training for machine learning practitioners new to pathology by integrating domain experts with your team.</li><li>Involving stakeholders throughout a project.</li><li>Literature reviews to search for associated publications and potential solutions to avoid overly complicated solutions.</li><li>Validating models with ethnically diverse datasets.</li><li>Analytical validation for differing stains, scanners, and operators.</li><li>Clinical validation on a held out dataset in the same environment as would be in the clinic.</li><li>Identifying relevant metrics from conversations with pathologists, oncologists, nurses, and patients.</li><li>Focus on the problem you’re trying to solve – AI is just a tool.</li></ul><p><br></p><p><strong>Links:</strong></p><p><a href="https://sonraianalytics.com/">Sonrai Analytics’ website</a></p><p><a href="https://www.linkedin.com/in/matthew-alderdice-6b401a118/">Matt Alderdice on LinkedIn</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I talk with Matt Alderdice, Head of Data Science at Sonrai Analytics, about precision medicine. Sonrai Analytics automates laborious data processes and speeds up new drug and healthcare developments.</p><p><br></p><p><strong>Highlights:</strong></p><ul><li>Machine learning for automating time-consuming and tedious analysis of microscopy images.</li><li>Training for machine learning practitioners new to pathology by integrating domain experts with your team.</li><li>Involving stakeholders throughout a project.</li><li>Literature reviews to search for associated publications and potential solutions to avoid overly complicated solutions.</li><li>Validating models with ethnically diverse datasets.</li><li>Analytical validation for differing stains, scanners, and operators.</li><li>Clinical validation on a held out dataset in the same environment as would be in the clinic.</li><li>Identifying relevant metrics from conversations with pathologists, oncologists, nurses, and patients.</li><li>Focus on the problem you’re trying to solve – AI is just a tool.</li></ul><p><br></p><p><strong>Links:</strong></p><p><a href="https://sonraianalytics.com/">Sonrai Analytics’ website</a></p><p><a href="https://www.linkedin.com/in/matthew-alderdice-6b401a118/">Matt Alderdice on LinkedIn</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 17 Oct 2022 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/c4f174b8/8b57b2bb.mp3" length="24580701" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/iH52o4eaoskH1BqCTNYslbaIRt2SMhpD3xidX3aV3Kw/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEwNTQ2NTUv/MTY2NTA4MzI3My1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1534</itunes:duration>
      <itunes:summary>Matt Alderdice, Head of Data Science at Sonrai Analytics, about precision medicine. Sonrai Analytics automates laborious data processes and speeds up new drug and healthcare developments.</itunes:summary>
      <itunes:subtitle>Matt Alderdice, Head of Data Science at Sonrai Analytics, about precision medicine. Sonrai Analytics automates laborious data processes and speeds up new drug and healthcare developments.</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Diagnosing Emergent Diseases with David Golan from Viz.ai</title>
      <itunes:episode>3</itunes:episode>
      <podcast:episode>3</podcast:episode>
      <itunes:title>Diagnosing Emergent Diseases with David Golan from Viz.ai</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">3aa63520-191b-40ad-95a2-e4e9ee41a141</guid>
      <link>https://pixelscientia.com/podcast/diagnosing-emergent-diseases-with-david-golan-from-viz-ai/</link>
      <description>
        <![CDATA[<p>In this episode, I talk with David Golan, co-founder and CTO of Viz.ai, about diagnosis of acute and emergent diseases. Viz.ai increases the speed of diagnosis and care for a variety of conditions to improve the lives of patients.</p><p><br></p><p><strong>Highlights:</strong></p><ul><li>Increasing access to lifesaving treatments.</li><li>The importance of the full system, not just the machine learning component, in accelerating workflows.</li><li>Their clinical AI team includes med students, MDs, biomedical engineers, and neuropsychologists.</li><li>Bias can be created by a lower performance on a subset of the population in a way that is unknown to developers, users, and clinicians.</li><li>Careful monitoring of algorithms to identify subsets of data with poor performance.</li><li>Unbiased collection and stratification of data for FDA submission.</li><li>The importance of good annotation and monitoring infrastructure.</li><li>Relatively simple model architectures can take you a long way.</li></ul><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.viz.ai/">Viz.ai’s website</a></p><p><a href="https://www.linkedin.com/in/david-golan-1bb9ba98/">David Golan on LinkedIn</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I talk with David Golan, co-founder and CTO of Viz.ai, about diagnosis of acute and emergent diseases. Viz.ai increases the speed of diagnosis and care for a variety of conditions to improve the lives of patients.</p><p><br></p><p><strong>Highlights:</strong></p><ul><li>Increasing access to lifesaving treatments.</li><li>The importance of the full system, not just the machine learning component, in accelerating workflows.</li><li>Their clinical AI team includes med students, MDs, biomedical engineers, and neuropsychologists.</li><li>Bias can be created by a lower performance on a subset of the population in a way that is unknown to developers, users, and clinicians.</li><li>Careful monitoring of algorithms to identify subsets of data with poor performance.</li><li>Unbiased collection and stratification of data for FDA submission.</li><li>The importance of good annotation and monitoring infrastructure.</li><li>Relatively simple model architectures can take you a long way.</li></ul><p><br></p><p><strong>Links:</strong></p><p><a href="https://www.viz.ai/">Viz.ai’s website</a></p><p><a href="https://www.linkedin.com/in/david-golan-1bb9ba98/">David Golan on LinkedIn</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 10 Oct 2022 06:02:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/a95789c0/fb0dfbee.mp3" length="30808241" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/NkiMEBa1ay6UHrx5tQ5ipxn4b4aITHagPimSZc9UDFI/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEwNTQ2NTAv/MTY2NTA3MTg5NC1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1923</itunes:duration>
      <itunes:summary>David Golan, co-founder and CTO of Viz.ai, about diagnosis of acute and emergent diseases. Viz.ai increases the speed of diagnosis and care for a variety of conditions to improve the lives of patients.</itunes:summary>
      <itunes:subtitle>David Golan, co-founder and CTO of Viz.ai, about diagnosis of acute and emergent diseases. Viz.ai increases the speed of diagnosis and care for a variety of conditions to improve the lives of patients.</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Environmental Risk Analysis with Steve Brumby from Impact Observatory</title>
      <itunes:episode>2</itunes:episode>
      <podcast:episode>2</podcast:episode>
      <itunes:title>Environmental Risk Analysis with Steve Brumby from Impact Observatory</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">c82b028a-531d-47f5-ab08-10e74d91613b</guid>
      <link>https://pixelscientia.com/podcast/environmental-risk-analysis-with-steve-brumby-from-impact-observatory/</link>
      <description>
        <![CDATA[<p>In this episode, I talk with Steve Brumby, co-founder, CEO and CTO of Impact Observatory, about sustainability and environmental risk analysis. Impact Observatory uses satellite imagery and machine learning to empower decision-makers with planetary insights.</p><p><br></p><p><strong>Highlights:</strong></p><ul><li>Using machine learning to generate thematic maps to represent land cover and land use.</li><li>Geospatial data from the European Space Agency’s Copernicus program that is available on a variety of platforms.</li><li>The importance of identifying the relevant output for end users and others in the value chain.</li><li>How machine learning engineers sometimes discover things used by remote sensing scientists that are no longer necessary.</li><li>Keeping models simple.</li><li>Mitigating bias in models by using large and globally diverse datasets.</li><li>Get to know your customer and their pain points, then craft a machine learning solution that works for them – if you’re lucky, it’ll also work for others.</li><li>Finding the things you’re passionate about – both the technology and helping the customers in that space.</li></ul><p><br></p><p><strong>Links:</strong></p><ul><li><a href="https://www.impactobservatory.com">Impact Observatory's website</a></li><li><a href="https://www.linkedin.com/in/stevenbrumby/">Steve Brumby on LinkedIn</a></li><li>K. Karra, C. Kontgis, Z. Statman-Weil, J. C. Mazzariello, M. Mathis and S. P. Brumby, "Global land use / land cover with Sentinel 2 and deep learning," 2021 IEEE International Geoscience and Remote Sensing Symposium IGARSS, 2021, pp. 4704-4707, doi: <a href="https://ieeexplore.ieee.org/document/9553499">10.1109/IGARSS47720.2021.9553499</a></li><li>Brown, C.F., Brumby, S.P., Guzder-Williams, B. et al. Dynamic World, Near real-time global 10 m land use land cover mapping. Scientific Data 9, 251 (2022). <a href="https://doi.org/10.1038/s41597-022-01307-4">https://doi.org/10.1038/s41597-022-01307-4</a></li><li>Impact Observatory’s data<ul><li><a href="https://www.arcgis.com/home/item.html?id=d6642f8a4f6d4685a24ae2dc0c73d4ac">https://www.arcgis.com/home/item.html?id=d6642f8a4f6d4685a24ae2dc0c73d4ac</a></li><li><a href="https://planetarycomputer.microsoft.com/dataset/io-lulc-9-class">https://planetarycomputer.microsoft.com/dataset/io-lulc-9-class</a></li></ul></li><li>UN Biodiversity Lab: <a href="https://map.unbiodiversitylab.org/location/UNBL/australia?basemap=satellite&amp;coordinates=-25.2757386,118.7104437,3&amp;layers=10m-annual-land-use-land-cover-9-class-01_100,wcmc-terrestrial-carbon-v3_100,esri-2020-land-cover-10m-io_60">country dashboard for Australia</a></li></ul><p><strong>Resources for Computer Vision Teams:</strong><br><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.<br><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.<br><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.<br><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I talk with Steve Brumby, co-founder, CEO and CTO of Impact Observatory, about sustainability and environmental risk analysis. Impact Observatory uses satellite imagery and machine learning to empower decision-makers with planetary insights.</p><p><br></p><p><strong>Highlights:</strong></p><ul><li>Using machine learning to generate thematic maps to represent land cover and land use.</li><li>Geospatial data from the European Space Agency’s Copernicus program that is available on a variety of platforms.</li><li>The importance of identifying the relevant output for end users and others in the value chain.</li><li>How machine learning engineers sometimes discover things used by remote sensing scientists that are no longer necessary.</li><li>Keeping models simple.</li><li>Mitigating bias in models by using large and globally diverse datasets.</li><li>Get to know your customer and their pain points, then craft a machine learning solution that works for them – if you’re lucky, it’ll also work for others.</li><li>Finding the things you’re passionate about – both the technology and helping the customers in that space.</li></ul><p><br></p><p><strong>Links:</strong></p><ul><li><a href="https://www.impactobservatory.com">Impact Observatory's website</a></li><li><a href="https://www.linkedin.com/in/stevenbrumby/">Steve Brumby on LinkedIn</a></li><li>K. Karra, C. Kontgis, Z. Statman-Weil, J. C. Mazzariello, M. Mathis and S. P. Brumby, "Global land use / land cover with Sentinel 2 and deep learning," 2021 IEEE International Geoscience and Remote Sensing Symposium IGARSS, 2021, pp. 4704-4707, doi: <a href="https://ieeexplore.ieee.org/document/9553499">10.1109/IGARSS47720.2021.9553499</a></li><li>Brown, C.F., Brumby, S.P., Guzder-Williams, B. et al. Dynamic World, Near real-time global 10 m land use land cover mapping. Scientific Data 9, 251 (2022). <a href="https://doi.org/10.1038/s41597-022-01307-4">https://doi.org/10.1038/s41597-022-01307-4</a></li><li>Impact Observatory’s data<ul><li><a href="https://www.arcgis.com/home/item.html?id=d6642f8a4f6d4685a24ae2dc0c73d4ac">https://www.arcgis.com/home/item.html?id=d6642f8a4f6d4685a24ae2dc0c73d4ac</a></li><li><a href="https://planetarycomputer.microsoft.com/dataset/io-lulc-9-class">https://planetarycomputer.microsoft.com/dataset/io-lulc-9-class</a></li></ul></li><li>UN Biodiversity Lab: <a href="https://map.unbiodiversitylab.org/location/UNBL/australia?basemap=satellite&amp;coordinates=-25.2757386,118.7104437,3&amp;layers=10m-annual-land-use-land-cover-9-class-01_100,wcmc-terrestrial-carbon-v3_100,esri-2020-land-cover-10m-io_60">country dashboard for Australia</a></li></ul><p><strong>Resources for Computer Vision Teams:</strong><br><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.<br><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.<br><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.<br><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 10 Oct 2022 06:01:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/f5fd44cb/ae694faa.mp3" length="35405816" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/aTyocZaovDqCgdtNOOQ5QlcgSJAyP_Jey5e0slzJ8Qg/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEwNTQ2MDMv/MTY2NTA2OTk5Ny1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>2211</itunes:duration>
      <itunes:summary>Steve Brumby, co-founder, CEO and CTO of Impact Observatory, about sustainability and environmental risk analysis. Impact Observatory uses satellite imagery and machine learning to empower decision-makers with planetary insights.</itunes:summary>
      <itunes:subtitle>Steve Brumby, co-founder, CEO and CTO of Impact Observatory, about sustainability and environmental risk analysis. Impact Observatory uses satellite imagery and machine learning to empower decision-makers with planetary insights.</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Diagnosis and Management of Epilepsy with Dean Freestone from Seer</title>
      <itunes:episode>1</itunes:episode>
      <podcast:episode>1</podcast:episode>
      <itunes:title>Diagnosis and Management of Epilepsy with Dean Freestone from Seer</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">6a11fff0-ba58-4c11-bf73-5b76a4b1b7d4</guid>
      <link>https://pixelscientia.com/podcast/diagnosis-and-management-of-epilepsy-with-dean-freestone-from-seer/</link>
      <description>
        <![CDATA[<p>In this episode, I talk with Dean Freestone, co-founder and CEO of Seer, about epilepsy. Seer uses home monitoring to diagnose and manage neurological conditions, relieving bottlenecks in the healthcare system.</p><p><br></p><p><strong>Highlights:</strong></p><ul><li>Using machine learning to summarize data to reduce the labor intensive search for episodic events.</li><li>Handling imbalanced datasets. </li><li>Controlling the workflows to enable gathering and annotating huge datasets.</li><li>Working with technicians to speed up review of EEG data.</li><li>Using machine learning to capture features that doctors can’t describe.</li><li>Dealing with low inter-reviewer agreement from clinicians.</li><li>How bias can manifest is neurological data.</li><li>Do not underestimate the cost and amount of work to build a healthcare AI startup.</li></ul><p><br></p><p><strong>Links:</strong></p><p><a href="https://seermedical.com/">Seer’s website</a></p><p><a href="https://www.linkedin.com/company/seer-medical/">Seer on LinkedIn</a></p><p><a href="https://www.linkedin.com/in/deanfreestone/">Dean Freestone on LinkedIn</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>In this episode, I talk with Dean Freestone, co-founder and CEO of Seer, about epilepsy. Seer uses home monitoring to diagnose and manage neurological conditions, relieving bottlenecks in the healthcare system.</p><p><br></p><p><strong>Highlights:</strong></p><ul><li>Using machine learning to summarize data to reduce the labor intensive search for episodic events.</li><li>Handling imbalanced datasets. </li><li>Controlling the workflows to enable gathering and annotating huge datasets.</li><li>Working with technicians to speed up review of EEG data.</li><li>Using machine learning to capture features that doctors can’t describe.</li><li>Dealing with low inter-reviewer agreement from clinicians.</li><li>How bias can manifest is neurological data.</li><li>Do not underestimate the cost and amount of work to build a healthcare AI startup.</li></ul><p><br></p><p><strong>Links:</strong></p><p><a href="https://seermedical.com/">Seer’s website</a></p><p><a href="https://www.linkedin.com/company/seer-medical/">Seer on LinkedIn</a></p><p><a href="https://www.linkedin.com/in/deanfreestone/">Dean Freestone on LinkedIn</a></p><p><strong>Resources for Computer Vision Teams:</strong></p><p><a href="https://www.linkedin.com/in/hdcouture/">LinkedIn</a> – Connect with Heather.</p><p><a href="https://pixelscientia.com/newsletter/">Computer Vision Insights Newsletter</a> – A biweekly newsletter to help bring the latest machine learning and computer vision research to applications in people and planetary health.</p><p><a href="http://pixelscientia.com/services/strategy-session/">Computer Vision Strategy Session</a> – Not sure how to advance your computer vision project? Get unstuck with a clear set of next steps. Schedule a 1 hour strategy session now to advance your project.</p><p><a href="https://pixelscientia.com/services/advisory/">Computer Vision Advisory Services</a> – Monthly advisory services to help you strategically plan your CV/ML capabilities, reduce the trial-and-error of model development, and get to market faster.</p>]]>
      </content:encoded>
      <pubDate>Mon, 10 Oct 2022 06:00:00 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/8db91146/22e16c4c.mp3" length="25120670" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:image href="https://img.transistor.fm/dAK_2mf1d-z3fibhkODIyNBLqwzJwH03OcFFTr5qnFk/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9lcGlz/b2RlLzEwNDk3OTcv/MTY2NDkzOTI2Ni1h/cnR3b3JrLmpwZw.jpg"/>
      <itunes:duration>1568</itunes:duration>
      <itunes:summary>Dean Freestone, co-founder and CEO of Seer, about epilepsy. Seer uses home monitoring to diagnose and manage neurological conditions, relieving bottlenecks in the healthcare system.</itunes:summary>
      <itunes:subtitle>Dean Freestone, co-founder and CEO of Seer, about epilepsy. Seer uses home monitoring to diagnose and manage neurological conditions, relieving bottlenecks in the healthcare system.</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/8db91146/transcript.vtt" type="text/vtt" rel="captions"/>
    </item>
    <item>
      <title>Welcome to Impact AI</title>
      <itunes:title>Welcome to Impact AI</itunes:title>
      <itunes:episodeType>trailer</itunes:episodeType>
      <guid isPermaLink="false">55e94fc8-9848-4172-a886-aabdb3eb1f71</guid>
      <link>https://pixelscientia.com/podcast/welcome-to-impact-ai/</link>
      <description>
        <![CDATA[<p>Welcome to Impact AI, the podcast for startups who want to create a better future through the use of machine learning.</p><p><br></p><p>I'm your host, Heather Couture.</p><p><br></p><p>In this podcast, you’ll learn how to build a mission-driven machine learning company. </p><p><br></p><p>I’ll be interviewing innovators and entrepreneurs from a variety of industries: healthcare, drug development, environmental, agriculture, and many more.</p><p><br></p><p>Each is striving to solve a problem that they are passionate about. They will talk about the role machine learning plays in their technology and the impact of their product.</p><p><br></p><p>They will also help me uncover machine learning challenges like data annotation, generalizability, explainability, bias, and collaboration across disciplines – and best practices for tackling them in a startup environment.</p><p><br></p><p>Now, who am I?</p><p><br></p><p>I’m a consultant with almost 2 decades of experience in computer vision and machine learning for a variety of applications. From manufacturing to planetary science to commercial media to cancer research.</p><p><br></p><p>I completed a Masters at Carnegie Mellon University and a PhD in Computer Science at the University of North Carolina. As a researcher, I published in top-tier computer vision and medical imaging venues. Now I write regularly on LinkedIn, for my newsletter Pathology ML Insights, and for a variety of trade publications.</p><p><br></p><p>I offer consulting services through my company Pixel Scientia Labs to help startups get to market faster by building more generalizable computer vision models. I make use of the latest machine learning research to amplify their results and support their in-house team for the long term. My mission is to fight cancer and climate change with AI – and I do that by strengthening the machine learning component of my clients’ most impactful projects.</p><p><br></p><p>My hope for this podcast is to share machine learning best practices more widely so that many others can benefit as they work towards solving important problems.</p><p><br></p><p>Thanks for listening.</p><p><br></p><p>Please hit subscribe to be notified about new episodes.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Welcome to Impact AI, the podcast for startups who want to create a better future through the use of machine learning.</p><p><br></p><p>I'm your host, Heather Couture.</p><p><br></p><p>In this podcast, you’ll learn how to build a mission-driven machine learning company. </p><p><br></p><p>I’ll be interviewing innovators and entrepreneurs from a variety of industries: healthcare, drug development, environmental, agriculture, and many more.</p><p><br></p><p>Each is striving to solve a problem that they are passionate about. They will talk about the role machine learning plays in their technology and the impact of their product.</p><p><br></p><p>They will also help me uncover machine learning challenges like data annotation, generalizability, explainability, bias, and collaboration across disciplines – and best practices for tackling them in a startup environment.</p><p><br></p><p>Now, who am I?</p><p><br></p><p>I’m a consultant with almost 2 decades of experience in computer vision and machine learning for a variety of applications. From manufacturing to planetary science to commercial media to cancer research.</p><p><br></p><p>I completed a Masters at Carnegie Mellon University and a PhD in Computer Science at the University of North Carolina. As a researcher, I published in top-tier computer vision and medical imaging venues. Now I write regularly on LinkedIn, for my newsletter Pathology ML Insights, and for a variety of trade publications.</p><p><br></p><p>I offer consulting services through my company Pixel Scientia Labs to help startups get to market faster by building more generalizable computer vision models. I make use of the latest machine learning research to amplify their results and support their in-house team for the long term. My mission is to fight cancer and climate change with AI – and I do that by strengthening the machine learning component of my clients’ most impactful projects.</p><p><br></p><p>My hope for this podcast is to share machine learning best practices more widely so that many others can benefit as they work towards solving important problems.</p><p><br></p><p>Thanks for listening.</p><p><br></p><p>Please hit subscribe to be notified about new episodes.</p>]]>
      </content:encoded>
      <pubDate>Tue, 04 Oct 2022 22:42:21 -0400</pubDate>
      <author>Heather D. Couture</author>
      <enclosure url="https://2.gum.fm/op3.dev/e/pdcn.co/e/pdst.fm/e/dts.podtrac.com/redirect.mp3/media.transistor.fm/1e71a21e/09c8c2c8.mp3" length="1999494" type="audio/mpeg"/>
      <itunes:author>Heather D. Couture</itunes:author>
      <itunes:duration>123</itunes:duration>
      <itunes:summary>Welcome to Impact AI, the podcast for startups who want to create a better future through the use of machine learning.</itunes:summary>
      <itunes:subtitle>Welcome to Impact AI, the podcast for startups who want to create a better future through the use of machine learning.</itunes:subtitle>
      <itunes:keywords>AI, startups, business, machine learning, computer vision, deep learning, impact, sustainability, healthcare, medical</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
  </channel>
</rss>
