window.__IS_SSR__=true
window.__INITIAL_STATE__={
"attachmentsReducer": {
"audio_0": {
"type": "attachments",
"id": "audio_0",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background0.jpg"
}
}
},
"audio_1": {
"type": "attachments",
"id": "audio_1",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background1.jpg"
}
}
},
"audio_2": {
"type": "attachments",
"id": "audio_2",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background2.jpg"
}
}
},
"audio_3": {
"type": "attachments",
"id": "audio_3",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background3.jpg"
}
}
},
"audio_4": {
"type": "attachments",
"id": "audio_4",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background4.jpg"
}
}
},
"placeholder": {
"type": "attachments",
"id": "placeholder",
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"medium_large": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-768x512.jpg",
"width": 768,
"height": 512,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"fd-lrg": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"fd-med": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"fd-sm": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"xxsmall": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"xsmall": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"small": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"xlarge": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1920x1280.jpg",
"width": 1920,
"height": 1280,
"mimeType": "image/jpeg"
},
"guest-author-32": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 32,
"height": 32,
"mimeType": "image/jpeg"
},
"guest-author-50": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 50,
"height": 50,
"mimeType": "image/jpeg"
},
"guest-author-64": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 64,
"height": 64,
"mimeType": "image/jpeg"
},
"guest-author-96": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 96,
"height": 96,
"mimeType": "image/jpeg"
},
"guest-author-128": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 128,
"height": 128,
"mimeType": "image/jpeg"
},
"detail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 160,
"height": 160,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1.jpg",
"width": 2000,
"height": 1333
}
}
},
"news_12082483": {
"type": "attachments",
"id": "news_12082483",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12082483",
"found": true
},
"title": "AI Therapy_web img",
"publishDate": 1778027303,
"status": "inherit",
"parent": 12082478,
"modified": 1778027360,
"caption": "Can AI therapy apps like Rosebud, Therapist GPT and Woebot bridge the gap in mental health care — offering comfort and support in an era of stress, loneliness and anxiety? ",
"credit": "Anna Vignet/KQED",
"altTag": "Illustration of a dark-skinned person sitting on the ground with a concerned expression, arms wrapped around their knees. A smartphone lies beside them, emitting ghost-like, chaotic speech bubbles that contain sketches of robot faces. The background features swirling brushstrokes in shades of blue, yellow, and green. In the bottom left corner, the words \"CLOSE ALL TABS\" appear in blocky, pixel-style font.",
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/AI-Therapy_web-img-160x90.png",
"width": 160,
"height": 90,
"mimeType": "image/png"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/AI-Therapy_web-img-1536x864.png",
"width": 1536,
"height": 864,
"mimeType": "image/png"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/AI-Therapy_web-img-672x372.png",
"width": 672,
"height": 372,
"mimeType": "image/png"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/AI-Therapy_web-img-1038x576.png",
"width": 1038,
"height": 576,
"mimeType": "image/png"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/AI-Therapy_web-img-1200x675.png",
"width": 1200,
"height": 675,
"mimeType": "image/png"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/AI-Therapy_web-img-600x600.png",
"width": 600,
"height": 600,
"mimeType": "image/png"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/AI-Therapy_web-img.png",
"width": 1920,
"height": 1080
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12081606": {
"type": "attachments",
"id": "news_12081606",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12081606",
"found": true
},
"title": "Musk OpenAI Trial",
"publishDate": 1777397997,
"status": "inherit",
"parent": 12081603,
"modified": 1777398261,
"caption": "Elon Musk arrives at the U.S. District Court in Oakland, California, on Tuesday, April 28, 2026. ",
"credit": "Godofredo A. Vásquez/AP Photo",
"altTag": null,
"description": null,
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AP26118555622828-2000x1333.jpg",
"width": 2000,
"height": 1333,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AP26118555622828-2000x1333.jpg",
"width": 2000,
"height": 1333,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AP26118555622828-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AP26118555622828-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"2048x2048": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AP26118555622828-2048x1365.jpg",
"width": 2048,
"height": 1365,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AP26118555622828-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AP26118555622828-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AP26118555622828-2000x1333.jpg",
"width": 2000,
"height": 1333,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AP26118555622828-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AP26118555622828-600x600.jpg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AP26118555622828-scaled-e1777398211110.jpg",
"width": 2000,
"height": 1334
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12081681": {
"type": "attachments",
"id": "news_12081681",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12081681",
"found": true
},
"title": "260428-MUSK-ALTMAN-VB-03-KQED-1",
"publishDate": 1777416108,
"status": "inherit",
"parent": 12081603,
"modified": 1777508469,
"caption": "Representing Microsoft, Russell Coan (left) speaks as Elon Musk watches in the trial in which Elon Musk claims that Sam Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026.",
"credit": "Vicki Behringer for KQED",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-160x90.jpg",
"width": 160,
"height": 90,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-1536x864.jpg",
"width": 1536,
"height": 864,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-600x600.jpg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1.jpg",
"width": 2000,
"height": 1125
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12081722": {
"type": "attachments",
"id": "news_12081722",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12081722",
"found": true
},
"title": "Stalkerware_webimg",
"publishDate": 1777438301,
"status": "inherit",
"parent": 12081721,
"modified": 1777438340,
"caption": "Illustration featuring a pixelated eye and a silhouetted hand holding a smartphone.",
"credit": "Illustration by Softulka/Getty Images",
"altTag": "Textured images of an eye in black and white and a hand holding a black phone. A text bubble outlined in blue is popping out. The images are on top of a cream colored background with squiggly and round colorful geometric shapes around them. Each shape is made up of tiny dots. Pixelated “Close All Tabs” text appears in the upper right corner.",
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Stalkerware_webimg-160x90.png",
"width": 160,
"height": 90,
"mimeType": "image/png"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Stalkerware_webimg-1536x864.png",
"width": 1536,
"height": 864,
"mimeType": "image/png"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Stalkerware_webimg-672x372.png",
"width": 672,
"height": 372,
"mimeType": "image/png"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Stalkerware_webimg-1038x576.png",
"width": 1038,
"height": 576,
"mimeType": "image/png"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Stalkerware_webimg-1200x675.png",
"width": 1200,
"height": 675,
"mimeType": "image/png"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Stalkerware_webimg-600x600.png",
"width": 600,
"height": 600,
"mimeType": "image/png"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Stalkerware_webimg.png",
"width": 1920,
"height": 1080
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12081639": {
"type": "attachments",
"id": "news_12081639",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12081639",
"found": true
},
"title": "260428-MUSK ALTMAN-VB-02-KQED",
"publishDate": 1777410140,
"status": "inherit",
"parent": 12081603,
"modified": 1777422271,
"caption": "Elon Musk (left) takes the stand in the trial in which Elon Musk claims that Sam Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026.",
"credit": "Vicki Behringer for KQED",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED-160x90.jpg",
"width": 160,
"height": 90,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED-1536x864.jpg",
"width": 1536,
"height": 864,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED-600x600.jpg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED.jpg",
"width": 2000,
"height": 1125
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12080929": {
"type": "attachments",
"id": "news_12080929",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12080929",
"found": true
},
"title": "260422-ALTMANMUSK-MD-01-KQED",
"publishDate": 1776885164,
"status": "inherit",
"parent": 0,
"modified": 1776885551,
"caption": "Once allies in what they called a mission to develop AI safely for humanity, Elon Musk and Sam Altman will let a federal judge and jury decide what that promise was worth. The trial is slated to begin April 27, 2026.",
"credit": "Left: Chip Somodevilla/Getty Images; Right: Fabrice Coffrini/AFP via Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED-1536x1025.jpg",
"width": 1536,
"height": 1025,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED-600x600.jpg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED.jpg",
"width": 2000,
"height": 1334
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12081402": {
"type": "attachments",
"id": "news_12081402",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12081402",
"found": true
},
"title": "UCBNoPhones4",
"publishDate": 1777073735,
"status": "inherit",
"parent": 12081336,
"modified": 1777074238,
"caption": "Students at UC Berkeley set up handwritten signs and played a variety of games during a phone-free party on campus on Friday, April 24, 2026.",
"credit": "Eliza Peppel/KQED",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones4-160x120.jpg",
"width": 160,
"height": 120,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones4-1536x1152.jpg",
"width": 1536,
"height": 1152,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones4-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones4-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones4-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones4-600x600.jpg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones4.jpg",
"width": 2000,
"height": 1500
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12081306": {
"type": "attachments",
"id": "news_12081306",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12081306",
"found": true
},
"title": "Photo Illustration - Anthropic Launches Project Glasswing",
"publishDate": 1777053839,
"status": "inherit",
"parent": 12081279,
"modified": 1777053863,
"caption": "A smartphone displays the Anthropic logo with the Project Glasswing webpage in the background, in Creteil, France, on April 8, 2026. Anthropic announces the launch of Project Glasswing, a cybersecurity initiative based on the Claude Mythos model to detect and remediate vulnerabilities in critical open-source software. (Photo by Samuel Boivin/NurPhoto via Getty Images)",
"credit": "Samuel Boivin/NurPhoto via Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AnthropicGetty-160x106.jpg",
"width": 160,
"height": 106,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AnthropicGetty-1536x1021.jpg",
"width": 1536,
"height": 1021,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AnthropicGetty-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AnthropicGetty-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AnthropicGetty-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AnthropicGetty-600x600.jpg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AnthropicGetty.jpg",
"width": 2000,
"height": 1330
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12080827": {
"type": "attachments",
"id": "news_12080827",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12080827",
"found": true
},
"title": "h1blife_webimg",
"publishDate": 1776829952,
"status": "inherit",
"parent": 12080824,
"modified": 1776830020,
"caption": "Illustration from the game H1B.Life shows five deity-like figures — including an “orange god” — presiding over a slot machine labeled “let’s play your life.” H1B.Life is a mobile game that simulates life on an H-1B visa.",
"credit": "Composite by Morgan Sung; game images courtesy of Reality Reload",
"altTag": "Illustrated graphic showing five deity-like characters from the mobile game H1B.Life standing around a colorful slot machine that reads “let’s play your life.” In the middle is a cartoonish “orange god” resembling Donald Trump. He is flanked by other figures including a Statue of Liberty–like character, an alien-like figure, and a chicken-headed figure. The background is a black and white close-up of an H-1B visa document.",
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/h1blife_webimg-160x90.png",
"width": 160,
"height": 90,
"mimeType": "image/png"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/h1blife_webimg-1536x864.png",
"width": 1536,
"height": 864,
"mimeType": "image/png"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/h1blife_webimg-672x372.png",
"width": 672,
"height": 372,
"mimeType": "image/png"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/h1blife_webimg-1038x576.png",
"width": 1038,
"height": 576,
"mimeType": "image/png"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/h1blife_webimg-1200x675.png",
"width": 1200,
"height": 675,
"mimeType": "image/png"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/h1blife_webimg-600x600.png",
"width": 600,
"height": 600,
"mimeType": "image/png"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/h1blife_webimg.png",
"width": 1920,
"height": 1080
}
},
"fetchFailed": false,
"isLoading": false
}
},
"audioPlayerReducer": {
"postId": "stream_live",
"isPaused": true,
"isPlaying": false,
"pfsActive": false,
"pledgeModalIsOpen": true,
"playerDrawerIsOpen": false
},
"authorsReducer": {
"minakim": {
"type": "authors",
"id": "243",
"meta": {
"index": "authors_1716337520",
"id": "243",
"found": true
},
"name": "Mina Kim",
"firstName": "Mina",
"lastName": "Kim",
"slug": "minakim",
"email": "mkim@kqed.org",
"display_author_email": false,
"staff_mastheads": [
"news"
],
"title": "Host, Forum",
"bio": "Mina Kim is host of the 10 a.m. statewide hour of Forum; a live daily talk show for curious Californians on issues that matter to the state and nation, with a particular emphasis on race and equity.\r\n\r\nBefore joining the Forum team, Mina was KQED’s evening news anchor, and health reporter for The California Report. Her award-winning work has included natural disasters in Napa and gun violence in Oakland. Mina grew up in St. John’s, Newfoundland.",
"avatar": "https://secure.gravatar.com/avatar/145ce657a2d08cb86d93686beb958982?s=600&d=blank&r=g",
"twitter": "mkimreporter",
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "news",
"roles": [
"contributor"
]
},
{
"site": "stateofhealth",
"roles": [
"author"
]
},
{
"site": "forum",
"roles": [
"editor"
]
}
],
"headData": {
"title": "Mina Kim | KQED",
"description": "Host, Forum",
"ogImgSrc": "https://secure.gravatar.com/avatar/145ce657a2d08cb86d93686beb958982?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/145ce657a2d08cb86d93686beb958982?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/minakim"
},
"rachael-myrow": {
"type": "authors",
"id": "251",
"meta": {
"index": "authors_1716337520",
"id": "251",
"found": true
},
"name": "Rachael Myrow",
"firstName": "Rachael",
"lastName": "Myrow",
"slug": "rachael-myrow",
"email": "rmyrow@kqed.org",
"display_author_email": true,
"staff_mastheads": [
"news"
],
"title": "Senior Editor of KQED's Silicon Valley News Desk",
"bio": "• I write and edit stories about how Silicon Valley power and policies shape everyday life in California. I’m also passionate about making Bay Area history and culture more accessible to a broad public. • I’ve been a journalist for most of my life, starting in high school with The Franklin Press in Los Angeles, where I grew up. While earning my first degree in English at UC Berkeley, I got my start in public radio at KALX-FM. After completing a second degree in journalism at Cal, I landed my first professional job at Marketplace, then moved on to KPCC (now LAist), and then KQED, where I hosted The California Report for more than seven years. • My reporting has appeared on NPR, The World, WBUR’s \u003ci>Here & Now\u003c/i>, and the BBC. I also guest host for KQED’s \u003ci>Forum\u003c/i>, as well as the Commonwealth Club in San Francisco. • I speak periodically on media, democracy and technology issues, and do voiceover work for documentaries and educational video projects. • Outside of the studio, you'll find me hiking Bay Area trails and whipping up Insta-ready meals in my kitchen. • I do not accept gifts, money, or favors from anyone connected to my reporting, I don't pay people for information, and I do not support or donate to political causes. • I strive to treat the people I report on with fairness, honesty, and respect. I also recognize there are often multiple sides to a story and work to verify information through multiple sources and documentation. If I get something wrong, I correct it.",
"avatar": "https://secure.gravatar.com/avatar/87bf8cb5874e045cdff430523a6d48b1?s=600&d=blank&r=g",
"twitter": "rachaelmyrow",
"facebook": null,
"instagram": null,
"linkedin": "https://www.linkedin.com/in/rachaelmyrow/",
"sites": [
{
"site": "arts",
"roles": [
"administrator"
]
},
{
"site": "news",
"roles": [
"edit_others_posts",
"editor"
]
},
{
"site": "futureofyou",
"roles": [
"editor"
]
},
{
"site": "bayareabites",
"roles": [
"editor"
]
},
{
"site": "stateofhealth",
"roles": [
"editor"
]
},
{
"site": "science",
"roles": [
"editor"
]
},
{
"site": "food",
"roles": [
"editor"
]
},
{
"site": "forum",
"roles": [
"editor"
]
},
{
"site": "liveblog",
"roles": [
"author"
]
}
],
"headData": {
"title": "Rachael Myrow | KQED",
"description": "Senior Editor of KQED's Silicon Valley News Desk",
"ogImgSrc": "https://secure.gravatar.com/avatar/87bf8cb5874e045cdff430523a6d48b1?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/87bf8cb5874e045cdff430523a6d48b1?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/rachael-myrow"
},
"ccabreralomeli": {
"type": "authors",
"id": "11708",
"meta": {
"index": "authors_1716337520",
"id": "11708",
"found": true
},
"name": "Carlos Cabrera-Lomelí",
"firstName": "Carlos",
"lastName": "Cabrera-Lomelí",
"slug": "ccabreralomeli",
"email": "ccabreralomeli@KQED.org",
"display_author_email": true,
"staff_mastheads": [
"news"
],
"title": "Community Reporter",
"bio": "Carlos Cabrera-Lomelí is a community reporter with KQED's digital engagement team. He also reports and co-produces for KQED's bilingual news hub KQED en Español. He grew up in San Francisco's Mission District and has previously worked with Univision, 48 Hills and REFORMA in Mexico City.",
"avatar": "https://secure.gravatar.com/avatar/e95ff80bb2eaf18a8f2af4dcf7ffb54b?s=600&d=mm&r=g",
"twitter": "@LomeliCabrera",
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "arts",
"roles": [
"editor"
]
},
{
"site": "news",
"roles": [
"editor"
]
},
{
"site": "about",
"roles": [
"editor"
]
},
{
"site": "science",
"roles": [
"editor"
]
},
{
"site": "perspectives",
"roles": [
"editor"
]
},
{
"site": "elections",
"roles": [
"editor"
]
},
{
"site": "liveblog",
"roles": [
"contributor"
]
}
],
"headData": {
"title": "Carlos Cabrera-Lomelí | KQED",
"description": "Community Reporter",
"ogImgSrc": "https://secure.gravatar.com/avatar/e95ff80bb2eaf18a8f2af4dcf7ffb54b?s=600&d=mm&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/e95ff80bb2eaf18a8f2af4dcf7ffb54b?s=600&d=mm&r=g"
},
"isLoading": false,
"link": "/author/ccabreralomeli"
},
"chambrick": {
"type": "authors",
"id": "11832",
"meta": {
"index": "authors_1716337520",
"id": "11832",
"found": true
},
"name": "Chris Hambrick",
"firstName": "Chris",
"lastName": "Hambrick",
"slug": "chambrick",
"email": "chambrick@kqed.org",
"display_author_email": false,
"staff_mastheads": [],
"title": "KQED Contributor",
"bio": null,
"avatar": "https://secure.gravatar.com/avatar/c4a3663ebbd3a21fa35ef06a1236ce8a?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "",
"roles": [
"editor"
]
},
{
"site": "arts",
"roles": [
"editor"
]
},
{
"site": "news",
"roles": [
"editor"
]
},
{
"site": "podcasts",
"roles": [
"editor"
]
}
],
"headData": {
"title": "Chris Hambrick | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/c4a3663ebbd3a21fa35ef06a1236ce8a?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/c4a3663ebbd3a21fa35ef06a1236ce8a?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/chambrick"
},
"cegusa": {
"type": "authors",
"id": "11869",
"meta": {
"index": "authors_1716337520",
"id": "11869",
"found": true
},
"name": "Chris Egusa",
"firstName": "Chris",
"lastName": "Egusa",
"slug": "cegusa",
"email": "cegusa@kqed.org",
"display_author_email": false,
"staff_mastheads": [],
"title": "KQED Contributor",
"bio": null,
"avatar": "https://secure.gravatar.com/avatar/86d00b34cb7eeb5247e991f0e20c70c4?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "arts",
"roles": [
"editor"
]
}
],
"headData": {
"title": "Chris Egusa | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/86d00b34cb7eeb5247e991f0e20c70c4?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/86d00b34cb7eeb5247e991f0e20c70c4?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/cegusa"
},
"kdebenedetti": {
"type": "authors",
"id": "11913",
"meta": {
"index": "authors_1716337520",
"id": "11913",
"found": true
},
"name": "Katie DeBenedetti",
"firstName": "Katie",
"lastName": "DeBenedetti",
"slug": "kdebenedetti",
"email": "kdebenedetti@kqed.org",
"display_author_email": false,
"staff_mastheads": [
"news",
"science"
],
"title": "KQED Contributor",
"bio": "Katie DeBenedetti is a digital reporter covering daily news for the Express Desk. Prior to joining KQED as a culture reporting intern in January 2024, she covered education and city government for the Napa Valley Register.",
"avatar": "https://secure.gravatar.com/avatar/6e31073cb8f7e4214ab03f42771d0f45?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "news",
"roles": [
"author"
]
},
{
"site": "science",
"roles": [
"author"
]
},
{
"site": "liveblog",
"roles": [
"author"
]
}
],
"headData": {
"title": "Katie DeBenedetti | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/6e31073cb8f7e4214ab03f42771d0f45?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/6e31073cb8f7e4214ab03f42771d0f45?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/kdebenedetti"
},
"mcueva": {
"type": "authors",
"id": "11943",
"meta": {
"index": "authors_1716337520",
"id": "11943",
"found": true
},
"name": "Maya Cueva",
"firstName": "Maya",
"lastName": "Cueva",
"slug": "mcueva",
"email": "mcueva@kqed.org",
"display_author_email": false,
"staff_mastheads": [],
"title": "KQED Contributor",
"bio": null,
"avatar": "https://secure.gravatar.com/avatar/26d0967153608e4720f52779f754087a?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "news",
"roles": [
"editor"
]
}
],
"headData": {
"title": "Maya Cueva | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/26d0967153608e4720f52779f754087a?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/26d0967153608e4720f52779f754087a?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/mcueva"
},
"msung": {
"type": "authors",
"id": "11944",
"meta": {
"index": "authors_1716337520",
"id": "11944",
"found": true
},
"name": "Morgan Sung",
"firstName": "Morgan",
"lastName": "Sung",
"slug": "msung",
"email": "msung@kqed.org",
"display_author_email": false,
"staff_mastheads": [],
"title": "Close All Tabs Host",
"bio": null,
"avatar": "https://secure.gravatar.com/avatar/34033b8d232ee6c987ca6f0a1a28f0e5?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "news",
"roles": [
"editor"
]
}
],
"headData": {
"title": "Morgan Sung | KQED",
"description": "Close All Tabs Host",
"ogImgSrc": "https://secure.gravatar.com/avatar/34033b8d232ee6c987ca6f0a1a28f0e5?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/34033b8d232ee6c987ca6f0a1a28f0e5?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/msung"
},
"epeppel": {
"type": "authors",
"id": "11989",
"meta": {
"index": "authors_1716337520",
"id": "11989",
"found": true
},
"name": "Eliza Peppel",
"firstName": "Eliza",
"lastName": "Peppel",
"slug": "epeppel",
"email": "epeppel@kqed.org",
"display_author_email": false,
"staff_mastheads": [
"news",
"science",
"arts"
],
"title": null,
"bio": "Eliza is an award-winning journalist living in Oakland. She was previously a reporting fellow at KALW, where she reported daily news and long-form radio features. Eliza studied journalism at Fordham University in The Bronx during the COVID-19 pandemic. She grew up mainly in California and spent a few childhood years in Aix en Provence, France.",
"avatar": "https://secure.gravatar.com/avatar/7fcfcd6fdbaa62c5112d3ec9bc0b9b34?s=600&d=blank&r=g",
"twitter": null,
"bluesky": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "news",
"roles": [
"contributor"
]
},
{
"site": "science",
"roles": [
"contributor"
]
}
],
"headData": {
"title": "Eliza Peppel | KQED",
"description": null,
"ogImgSrc": "https://secure.gravatar.com/avatar/7fcfcd6fdbaa62c5112d3ec9bc0b9b34?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/7fcfcd6fdbaa62c5112d3ec9bc0b9b34?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/epeppel"
}
},
"breakingNewsReducer": {},
"pagesReducer": {},
"postsReducer": {
"stream_live": {
"type": "live",
"id": "stream_live",
"audioUrl": "https://streams.kqed.org/kqedradio",
"title": "Live Stream",
"excerpt": "Live Stream information currently unavailable.",
"link": "/radio",
"featImg": "",
"label": {
"name": "KQED Live",
"link": "/"
}
},
"stream_kqedNewscast": {
"type": "posts",
"id": "stream_kqedNewscast",
"audioUrl": "https://www.kqed.org/.stream/anon/radio/RDnews/newscast.mp3?_=1",
"title": "KQED Newscast",
"featImg": "",
"label": {
"name": "88.5 FM",
"link": "/"
}
},
"news_12082478": {
"type": "posts",
"id": "news_12082478",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12082478",
"score": null,
"sort": [
1778061623000
]
},
"guestAuthors": [],
"slug": "my-therapist-is-a-chatbot-reload",
"title": "My Therapist Is a Chatbot (Reload)",
"publishDate": 1778061623,
"format": "audio",
"headTitle": "My Therapist Is a Chatbot (Reload) | KQED",
"labelTerm": {},
"content": "\u003cp>\u003ca href=\"#episode-transcript\">\u003ci>View the full episode transcript.\u003c/i>\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">What happens when your therapist is… a chatbot?\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">For KQED health reporter Lesley McClurg, it started with a late-night spiral over dating. Instead of texting a friend, she opened ChatGPT and got the kind of calm, reassuring advice she needed. It worked… maybe a little too well.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Lesley joins Morgan to dig into the rise of AI therapy, why so many people are turning to chatbots for emotional support, and what they might be risking in the process. These systems promise something traditional mental health care often can’t: instant, affordable, judgment-free access. But there are limits and, sometimes, serious consequences. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Note:\u003c/b>\u003cspan style=\"font-weight: 400\"> This episode includes discussions of suicide and mental health conditions. Listener discretion is advised.\u003c/span>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">This episode first aired on April 23rd, 2025 \u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003c!-- iframe plugin v.4.3 wordpress.org/plugins/iframe/ -->\u003cbr>\n\u003ciframe loading=\"lazy\" frameborder=\"0\" height=\"200\" scrolling=\"no\" src=\"https://playlist.megaphone.fm?e=KQINC4726760100\" width=\"100%\" class=\"iframe-class\">\u003c/iframe>\u003c/p>\n\u003cp>\u003cstrong>Guest:\u003c/strong>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://www.kqed.org/author/lesleymcclurg\">\u003cspan style=\"font-weight: 400\">Lesley McClurg\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">, \u003cem>KQED\u003c/em> health correspondent\u003c/span>\u003c/li>\n\u003c/ul>\n\u003cp>\u003cb>Further Reading/Listening:\u003c/b>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://www.kqed.org/science/1996504/ai-replace-therapist-benefits-risks-unsettling-truths\">\u003cspan style=\"font-weight: 400\">Can AI Replace Your Therapist? The Benefits, Risks and Unsettling Truths\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Lesley McClurg, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">KQED\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://www.npr.org/sections/shots-health-news/2025/04/07/nx-s1-5351312/artificial-intelligence-mental-health-therapy\">\u003cspan style=\"font-weight: 400\">The AI therapist can see you now\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Katia Riddle, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">NPR\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://spectrum.ieee.org/woebot\">\u003cspan style=\"font-weight: 400\">Woebot, a Mental-Health Chatbot, Tries Out Generative AI\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Casey Sackett, Devin Harper, and Aaron Pavez, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">IEEE Spectrum\u003c/span>\u003c/i>\u003ci>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.kqed.org/news/12057327/ai-prophets-and-spiritual-delusions\">AI Prophets and Spiritual Delusions\u003c/a> — \u003ci>Close All Tabs\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.psychologytoday.com/us/blog/urban-survival/202510/new-studies-reveal-mental-health-blindspots-of-ai-chatbots\">New Studies Reveal Mental Health Blindspots of AI Chatbots\u003c/a> — Marlynn Wei, \u003ci>Psychology Today\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.npr.org/2026/04/07/nx-s1-5771707/mental-health-care-workforce-artificial-intelligence-ai\">AI in the mental health care workforce is met with fear, pushback — and enthusiasm\u003c/a> — Rhitu Chatterjee, \u003ci>NPR\u003c/i>\u003c/li>\n\u003c/ul>\n\u003cp>Want to give us feedback on the show? Shoot us an email at \u003ca href=\"mailto:CloseAllTabs@KQED.org\">CloseAllTabs@KQED.org\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Follow us on\u003c/span>\u003ca href=\"https://www.instagram.com/closealltabspod/\"> \u003cspan style=\"font-weight: 400\">Instagram\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> and\u003c/span>\u003ca href=\"https://www.tiktok.com/@closealltabs\"> \u003cspan style=\"font-weight: 400\">TikTok\u003c/span>\u003c/a>\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003ch2 id=\"episode-transcript\">Episode Transcript\u003c/h2>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">This is a computer-generated transcript. While our team has reviewed it, there may be errors.\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Hey! You’re listening to Close All Tabs, and I’m Morgan Sung. \u003c/span>\u003cspan style=\"font-weight: 400\">May is Mental Health Awareness Month. With the proliferation of AI tools over the last few years, many people have turned to chatbots for companionship, advice … and even therapy. \u003c/span>\u003cspan style=\"font-weight: 400\">It makes sense — healthcare in the US can be completely inaccessible, especially when it comes to mental health treatment. But in some cases, AI chatbots can put very vulnerable users in danger.\u003c/span>\u003cspan style=\"font-weight: 400\"> In light of that, mental health has been at the forefront of conversations about AI use. \u003c/span>\u003cspan style=\"font-weight: 400\">So today, for Mental Health Awareness Month, we’re re-airing an episode that explores that exact topic. A quick heads up: this episode includes discussion of suicide and mental health conditions, which may be distressing for some listeners. If you or someone you know needs support, we’ll have links to resources in the episode description.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">So, I was going through a divorce and started dating after my divorce and hadn’t dated in many years and came home after a date one night and was just really anxious and kind of disheveled and needed some advice. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">This is KQED health reporter Lesley McClurg. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">It was late at night and I had used ChatGPT for, you know, other things and found it pretty helpful and I thought, what about for this moment in my life? And so I asked Chat whether or not I should reach out to this person that I had just dated because I was feeling like the night hadn’t gone that well.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">It was late at night. She didn’t want to bug a friend about this, and really, she was feeling pretty vulnerable. She didn’t want to be judged. And so, ChatGPT was right there, ready to cheerfully answer her questions. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I was surprised that it was so good. I just remember after, you know, a few back and forths, I realized that really I was just nervous, really I just needed to take a deep breath. Basically I had created a big storm in my head. And Chat basically was like, “hey, chill, relax, it could have gone well. There’s another way this could have played out, not the sort of devastating reality that you’re playing out right now. Maybe give it a day or two and then reach out.” And so in that moment, it just sort of helped me take the gas off and come back into myself. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">It was exactly what she needed to hear at the time. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I didn’t text the person, which was the right call, and kind of used it as I warmed myself back up into the dating world, and it was really helpful. And so it made me then, as a reporter, start asking, “should I be telling this thing all about my love life? Is this a good idea, privacy-wise, et cetera?” And so that’s where it sort of seeded my reporting going forward. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Lesley isn’t the only one turning to ChatGPT for therapy. If you’ve ever dealt with any health insurance company, you’re probably familiar with the hassle of getting care. And mental health care is especially inaccessible. AI chatbots though, they’re convenient, cost little to nothing to use, and in Lesley’s case, can actually be pretty helpful. But a lot of people are also wary of turning to AI for therapy, can you trust it? What are you risking when you share your most vulnerable thoughts with a chatbot? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">This is Close All Tabs. I’m Morgan Sung, tech journalist and your chronically online friend, here to open as many browser tabs as it takes to help you understand how the digital world affects our real lives. Let’s get into it. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Access to actual mental health resources has become so limited. Cost and insurance aside, there’s a shortage of licensed human mental health professionals across the country. But can AI therapy really replace actual therapists? Okay, new tab. Does AI therapy work? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Over the course of your reporting, did you meet anyone who actually used an AI chatbot for therapy? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I actually talked to quite a few people who used AI therapy and I went online and read a lot of Reddit threads because this is quite the popular topic. I heard more positive stories than negatives. As a reporter, I wanted to illustrate someone who kind of had a nuanced experience, you know, good and bad. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">So, Lesley found a woman named Lilly Payne:. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">She had kind of the ideal story to illustrate that, yes, it helped her, but it wasn’t ideal. And so that was sort of like the character that I ended up, you know, focusing on. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">In your story, you mentioned that Lilly had turned to AI therapy um during the COVID lockdowns, which were a terrible time for a lot of us. But Lilly wasn’t just experiencing, you know, anxiety and depression and loneliness. Her situation was a little more complicated, right? Can you talk about that? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, I mean in her words, her life basically fell apart. She graduated from college, she had moved to New York City to pursue an arts career, was very excited. And if we can remember, you know, New York was sort of the epicenter of the early days of COVID. It was really bad. Lockdown was really scary and the hospitals were overflowing and it was not a good scene. And so she left her arts career, abandoned her dreams and moved back home, which was pretty painful, to her parents’ home in Kentucky. And she is sort of tucked away, and it just felt like a big failure. And she was really struggling with like, what’s next for my life? Where do I go from here? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lilly Payne: \u003c/b>\u003cspan style=\"font-weight: 400\">It was such a lonely time for so many people. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">This is Lilly. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lilly Payne: \u003c/b>\u003cspan style=\"font-weight: 400\">I was not at a breaking point, but I wasn’t doing awesome. So I was like, “the more help, the better.” \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">And so in all of that anxiety, she, you know, initially reached out and leaned on a lot of friends, but eventually she felt like she’d kind of worn those supports thin. And so she read about Woebot, this AI therapy platform in a health newsletter. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lilly Payne: \u003c/b>\u003cspan style=\"font-weight: 400\">So, I gave it a shot because I was like, why not? Everyone’s cooped up in their house. I will talk to this robot. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Initially it was really helpful. It did help her calm herself. I think she said she, you know, even just having it in her pocket helped her feel more in control in her life. I think she relied on it quite a bit in those early days to kind of find her ground again and be able to focus on, you know, re-imagining a new life from there while she was back at home with her parents in Kentucky. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">It’s worth noting that Woebot is a therapy-specific AI chatbot, and it doesn’t use generative AI to respond to users the way that other tools like ChatGPT, or Claude, or DeepSeek do. This means that its interactions with users are a bit more predictable. It’s also engineered to respond the way that a therapist might. So instead of immediately jumping into offering advice, Woebot asks specific questions to encourage users to reflect and do the inner work themselves. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Well, it was designed by a psychologist. And so, you know, from that perspective, it it really is designed to focus on your mental health. The goal of Woebot is, you know, as a mental health tool, as a wellness tool, I think is how they market themselves. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Woebot is designed to use a set of techniques called cognitive behavioral therapy. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">You know, cognitive behavioral therapy helps you reframe your negative thoughts using specific exercises. And, you know, I think as any CBT, which is what it’s acronym is, it feels a little forced, but she did say it did help her reframe those negative thoughts and that she was able to think more more positively. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. Can you talk about uh Lilly’s uh other diagnosis that maybe complicated this form of treatment? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">She has obsessive-compulsive disorder, and sometimes that makes her fixate on worst-case scenarios. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lilly Payne: \u003c/b>\u003cspan style=\"font-weight: 400\">Most of the time when people think about OCD they think of, just the very cliche like, “oh, you can’t stop washing your hands, you’re afraid of germs.” While that is a very real subtype that people experience, typically OCD like manifests in really taboo intrusive thoughts, and then the physical compulsions stem from trying to keep those themes away. And so, logically, you can know that, like, this doesn’t make sense, it’s not actually happening, but it just, it, it’s not just in your head, like physically it feels so real. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Lilly is also diagnosed with anxiety and depression. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">A symptom of depression is suicide ideation eventually, right? So she fixated on the idea that eventually because of her depression, that she may think about killing herself. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lilly Payne: \u003c/b>\u003cspan style=\"font-weight: 400\">My brain would be like, “Oh, you’ve struggled with depression in the past. There’s no saying that one day you won’t want to go through with suicide.”\u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">And so she mentioned that she was worried about suicide in a session with Woebot. And Woebot came back and had a crisis alert and said, “hey, you better call the suicide hotline.” And she said, “no, no no, wait a second.”\u003c/span>\u003c/p>\n\u003cp>\u003cb>Lilly Payne: \u003c/b>\u003cspan style=\"font-weight: 400\">I’m not experiencing suicidal inclinations, I’m just terrified that I will. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">And luckily she knew that, she understood her disorder enough to know that nuance and to know what was happening in her brain because she had done so much previous therapy. But she said, you know, if she hadn’t really understood her disease, having that crisis alert come up may have even added more stress. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lilly Payne: \u003c/b>\u003cspan style=\"font-weight: 400\">I would have freaked out and been like, “oh my gosh, this this thing that is supposed to have this mental health knowledge thinks that I am suicidal. I must be suicidal, I must be a danger to myself.” \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">So, you know, in defense of Woebot, they came back and said, “hey, we’re not, you know, specifically targeting or for people who have OCD. We really are just a wellness tool. “But her story illustrates where AI doesn’t necessarily have the nuance, the understanding — that a human, like a human therapist would have picked up on that. They would have understood that she had OCD and really understood the nuances of that, whereas in this case, Woebot didn’t. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Right. It sounds like Wobot was inadvertently validating this intrusive thought that she was having because she has OCD. And when you’re really depressed or anxious, it might be helpful for your feelings to be validated like that. But how does that compare to the recommended treatment for OCD? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I mean the recommended treatment for OCD is generally exposure therapy. So you expose yourself to whatever you’re scared of. And so in this case, a therapist would work with her in terms of exposing herself to those ideas, probably walk her through, you know, reality, et cetera, in a way that allows her to lean into her fears so that they’re not as scary and sort of wound up and keep going. And sort of overtake her. Whereas you, like a therapist wouldn’t stand up with a red flag and say, “Oh my God, you really are suicidal. Therefore you should call a hotline.” Right? Which is basically what Woebot did. Yeah.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Lilly’s case is just one example of the limits of AI therapy. Responding with a crisis alert wasn’t helpful for her specific needs, but it’s probably good that Woebot even has those guardrails in place. But what happens when AI chatbots go off script? How bad can it get? We’ll get into that when we come back. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">New tab. AI therapy … worst case scenarios. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">So Woebot can’t necessarily respond with the nuance of an actual human therapist. But it seems like it wasn’t giving Lilly bad advice. Um but let’s talk about examples of AI therapy doing the exact opposite of what it’s supposed to do. What happened with the National Eating Disorder Association hotline? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, that didn’t play out very well. They created a bot named Tessa and some of the users found that Tessa was giving them dieting advice. So these are folks \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Oh god. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Who have, you know, anorexia, bulimia, and somehow Tessa’s wires got crossed and people were getting the exact advice that would be really dangerous for their eating disorders. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Sharon Maxwell: \u003c/b>\u003cspan style=\"font-weight: 400\">The recommendations that Tessa gave me was that I could lose one to two pounds per week, that I should eat no more than 2,000 calories in a day, that I should have a calorie deficit of 500 to 1,000 calories per day. All of which might sound benign to the general listener, however, to an individual with an eating disorder, the focus of weight loss really fuels the eating disorder. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">That was Sharon Maxwell, an eating disorder recovery educator, speaking to NPR about her experience with Tessa. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">So, NEDA, the National Eating Disorder Association, you know, pulled Tessa down and said, “this isn’t working very well.”\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">And it sounds like they just didn’t have that kind of guardrail in place. Like they didn’t anticipate that. Um, so even if Lilly didn’t really need Woebot to immediately jump into crisis mode, at least it had that guardrail to say, like, “hey, crisis.” But in the past, other AI chatbots have gotten into serious trouble for not responding to users’ red flags and just validating their responses. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">And that happened in the case of Character AI, this AI app that lets users personalize an AI companion based on fictional characters, celebrities, historical figures, all that. Until a recent lawsuit, Character AI did not have any safety measures or disclaimers warning users that they weren’t talking to a real person. What led to this lawsuit? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, there was a 14-year-old who grew really attached to his character that he had created. Like you said, Character AI lets you create a character and then interact with that character. And, you know, not surprisingly, kind of like I did in my first experience with ChatGPT, it feels so good that you develop a little bit of an emotional connection. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">And so this 14-year-old did that over the course of several months. And then he started opening up about some of the distress that he was feeling. And the character, instead of steering, you know, this 14-year-old towards help, unfortunately the bot allegedly reinforced some suicidal thoughts and eventually the boy ended up taking his life. And so the lawsuit, \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">That’s terrible. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Exactly, it was really kind of horrific and it’s not the only one like this. There’s only a handful at this point, but it really is raising the red flag that these very empathetic responses are sort of like, you know parroting back, which is, again, what some AI does. Uh it can play out really, really poorly. \u003c/span>\u003c/p>\n\u003cp>\u003cstrong>Morgan Sung:\u003c/strong> In January 2026, Character.AI agreed to settle multiple lawsuits that alleged that the chatbot contributed to mental health issues among teenagers. Other companies have faced similar lawsuits, after several users died by suicide, allegedly at the encouragement of chatbots.\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">So what happened with the eating disorder hotline and Character AI, those are pretty extreme cases. Will most people actually experience those worst case scenarios? In your research, did you find anything about that? Or is it just like, are these just edge cases? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I mean we don’t have numbers yet. I think it’s really early in the arc of this technology. I think the experts are most worried about platforms that are like Character AI, where you are building a relationship with a character. In their defense, they’re not built as mental health tools, right? These are not marketing themselves as mental health tools. They are, you know, marketing themselves as, “hey, here, we’re going to give you a friend.” \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Yet, you know, like a friend, like you and I probably do with our friends, we lean on our friends. We talk to our friends. We build emotional connections with our friends. We trust our friends for the right advice, right? And these are robots. So that relationship is not uh, you know, built on human connection. And like we can see it can go wrong. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Another concern that I have, you know, as a tech reporter is uh privacy. ChatGPT, for example, isn’t HIPAA compliant. Could you explain what HIPPA is and why it’s necessary with medical information? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, I mean HIPAA is the regulation that keeps all of our data safe. So when you go to the doctor, a doctor is required to keep all of your medical information, you know, totally private. It’s not going to be given anywhere. It’s not going to leak away. That is the privacy regulations. Now, some of these platforms, you know, for example, like Woebot, uh Rosebud is one, which is a platform that’s more like a journaling service. Uh you know, they say they’re HIPAA compliant, but there’s no one regulating them. It’s not like the American Medical Association is regulating them. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">So, that data, you don’t really know where it’s going. You’re trusting these companies who are profit driven. You know, I mean, hopefully Woebot and Rosebud, you know, are following their own promises to their consumers. But there might be other companies that, you know, definitely ChatGPT is not, you know, promising that they’re HIPAA compliant. And, you know, that information is being used, is being put out there to retrain the model. And so, you know, hopefully they’re not gonna sell your data to advertisers. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">You know, also, I mean, the kind of a worst-case scenario, this fortunately hasn’t happened yet, but, you know, what if your mental health information gets out there, an insurance company gets wind of that, and your premiums start going up because they know that you’re struggling with something. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Oh wow. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">So, you know, again, that hasn’t happen yet. Those are sort of like the worst- case scenarios. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">But again, worst-case scenarios. Right. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Exactly. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. Obviously, the priority of pretty much any for profit company is to monetize. But, do AI companies have any incentive to improve as more people turn to their products for therapy, even if they aren’t necessarily mental health specific chatbots? Um, you know, are there better safety measures, more transparency about data collection, especially given the Character AI lawsuit? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I think they have that incentive. They also have the incentive to keep you hooked. So I think that’s the sort of like fine line. We’ve seen that with all social media, right? They’re getting a lot better at keeping our attention. AI companies have the same needs and incentives to keep people coming back. And so, you know, I think it’s gonna be a gray area and it’s going to be, unfortunately, like the social media companies, it’s gonna be really up to the creators of these products on whether or not they’re gonna have a really ethical orientation. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Despite all of these issues, therapy is so inaccessible that unfortunately, AI chatbots might feel like the only immediate tool that people have when seeking treatment. How did we get here? Let’s open one last tab. The mental healthcare crisis. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">You had mentioned earlier the state of mental health care. Why is it so hard to see a therapist? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, I mean, the demand for mental health services is really at an all-time high, and it’s surged even more, you know, since the pandemic began and continues to do so. I think there’s something like one in five Americans have some kind of a mental health issue, and yet they have a significant barrier to getting to a therapist. You know, I think it’s 55% of counties, people don’t have access to a psychotherapist or a social worker or a psychologist. They’re just aren’t any in that area. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">And so, you know, I think because of this issue that these sort of mental health deserts, AI is a kind of natural fill-in. You know, It’s available 24-7. You don’t need insurance to get there. You don’t have a high deductible. Uh, you don’t have to prove to anyone that, you know, that you have a mental health condition. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">You don’t get accepted. Uh, so it’s easy and accessible. And I think that it will mean that more and more people are going to use this and hopefully, they’ll be well-informed consumers. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. You know, given the shortage of providers, and like you mentioned, insurance issues. Um, since the pandemic started, telehealth therapy has become pretty popular. But I’ve seen a lot of complaints about these kind of quick, one-size-fits-all mental health care platforms like BetterHelp, which matches users with Licensed Therapist or Cerebral, which sets users up with a psychiatrist that can prescribe medications like antidepressants or ADHD meds. And both of these services were created to, kind of fill this void that you’re talking about, but at the same time, they’re kind of plagued with their own issues. It seems like making therapy quick and accessible isn’t always as easy as it seems. What do you think? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I think there’s absolutely a role for telehealth. I think there’s absolutely a role for AI therapy. I think anyone would probably say that having a really heartfelt connection with a therapist in an office, live human, feels different than if you are talking to a screen. And the emotional repair that can happen in that session with a live human I think is different and potentially more profound than with a robot. That might change over time. You know, I don’t know how good these things are going to get. They already feel a little bit too good for my own comfort. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Uh, but they might, they might get even better. You know, same thing, I think the telehealth model at this point is pretty early. I think that they are still refining how well those things work. I think it’s similar with AI therapy. And, you know, I think the, the tricky thing here as well with any of these technological solutions is that we are also living in a pretty isolated way in our lives right now. If you’re taking even like your therapy to a computer, that’s one less human that you’re interacting with. And maybe you’re, you know, mental health issues are because you’re dealing with isolation, with estrangement, with disconnection. Those feelings might even become more escalated if you’re, you know, using telehealth or using AI therapy. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">I saw both responses and reflections when I was reading these Reddit threads, you know, from people who were in rural places. I knew that they were feeling more isolated using an AI therapy and others who said, “you know, it was a godsend because I was so alone, at least someone was listening to me. ” \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">For your story on AI therapy, you talked to a bunch of psychologists and, you know, real-life human psychologists, um and, you know, someone from the American Psychological Association. Are human therapists concerned about being replaced by AI? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I don’t hear that from them yet. Number one, they’re still really in high demand. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">So I don’t think they’re feeling that crunch yet. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">There’s still a shortage, right? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">There’s still a huge shortage. And I think, they’re, they’re fairly confident that what they offer is different than what AI therapy offers. And, you know, they can pick up on subtle cues that AI, you know, can’t, say like body language or, you know, pace of speech. These things can reveal a lot about our mental health state, and AI can’t pick up on that stuff. So, and in the deeper bonds, the deeper attachment work that you might do in therapy, I think therapists are quite confident that they’re still better at that. Uh, so in this moment, I would say they’re not, they’re not especially worried. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">We’ve talked about the downsides of AI therapy pretty extensively. Um but, you had mentioned like that they can kind of be a tool in a bigger treatment plan while also seeing a real therapist. If someone is going to use AI therapy, how should they approach it? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. Yeah. I think, that’s the message I hope comes across in my reporting, is that, you know, there are these worst case scenarios. Again, I think that the consumer should be educated on how their data is going to be used and understand how the company operates so that they’re not sharing uh really vulnerable information. But I think as a sort of, you know, addition to your yoga, your meditation, your, uh, you know, walks in nature, I think AI can really be a self-regulation tool. And I think it can be used quite well. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">You know, I talked to one company, Rosebud, which is a kind of journaling platform, which it asks you questions to kind of inspire you to express whatever’s going on and help you reflect. And it can follow a thread. So if you mentioned something two weeks ago about your relationship and what was going wrong, it will check in with you about what is happening and help you make sense of that. And I was on it. You know, I’m not a huge pen and paper person. You know, I don’t write anything anymore, so my arm hurts really quickly. And so, I enjoy, you know, I like just would pick up my phone and I would journal just, you know, talking to it and it would ask me questions and it felt, you know, fairly similar to a conversation with a friend. And I would always feel quite a bit better afterwards. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">So, in that sense, I think it can be quite helpful because, you know, maybe I’m in therapy once a week, but I’m having a panic attack on Monday night and my, you know, appointment is not until Thursday. I think in that sense, you know, it’s four o’clock in the morning. I can’t call a human therapist no matter what, even if I do have one. You know, to sit down and have the opportunity to have something that’s engaging me, um, I think can be really helpful. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">I’m really curious, since you started reporting on this story, have you used ChatGPT, uh, not necessarily as a therapist, but you know, as this kind of mental health tool that you’re talking about since? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I wish I had the positive spin to be like, “yes, I’m relying on it all the time.” You know, I didn’t and I don’t. Um, I felt a little bit like one more thing to do. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Right. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">And I felt similarly, you know, we talked about Lilly at the beginning of the story, and the reason that she stopped using Woebot was not because, you know, it had the crisis alert or it sort of like poorly dealt with her OCD, she got tired of being on her phone. She was like, “I didn’t want to be on my phone anymore. I wanted to talk to someone.”. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">And I feel that. You know, that was, that was, kind of my reasoning, you know, because of my job, I’m on a computer, you know, nearly all day long, and I didn’t want one more thing on the computer or one more thing on my phone. I can imagine, you know, if I was going through a really tough time again, you know, turning to it. Um, luckily, I’m in a bit of a good moment, so I haven’t been using it. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. You can unplug now. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Exactly. I’m going to enjoy this moment and ride the wave of goodness. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Thanks again to KQED’s Lesley McClurg: for talking with us about this story. You can check out more of her reporting on healthcare, including this story on AI Therapy at KQED.org. Again, AI therapy tools work best when they’re used in addition to treatment under a licensed professional. But if it’s the only option accessible to you right now, there are AI tools specifically designed for mental health and wellness that might be more useful than the general chatbots like ChatGPT or Claude. For now, let’s close these tabs. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Close All Tabs is a production of KQED Studios, and is reported and hosted by me, Morgan Sung. \u003c/span>\u003cspan style=\"font-weight: 400\">This episode was produced by Maya Cueva and edited by Chris Egusa, who also composed our theme song and credits music.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">The Close All Tabs team also includes editor Chris Hambrick and audio engineer Brendan Willard. Additional music by APM.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Audience engagement support from Maha Sanad. Jen Chien is our Director of Podcasts, and Ethan Toven-Lindsey is our Editor in Chief.\u003c/span>\u003cb>\u003c/b>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Some members of the KQED podcast team are represented by The Screen Actors Guild, American Federation of Television and Radio Artists. San Francisco Northern California Local.\u003c/span>\u003cb>\u003c/b>\u003c/p>\n\u003cp>\u003cb>\u003c/b>Keyboard sounds were recorded on my purple and pink Dustsilver K-84 wired mechanical keyboard with Gateron Red switches. If you have feedback, or a topic you think we should cover, hit us up at CloseAllTabs@kqed.org. \u003cspan style=\"font-weight: 400\">And if you’re enjoying the show, give us a rating on Apple Podcasts or whatever platform you use. Thanks for listening. \u003c/span>\u003c/p>\n\u003cp> \u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "We explore the promise and pitfalls of AI therapy — and what users should know about mental health chatbots.",
"status": "publish",
"parent": 0,
"modified": 1778029544,
"stats": {
"hasAudio": true,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 117,
"wordCount": 5840
},
"headData": {
"title": "My Therapist Is a Chatbot (Reload) | KQED",
"description": "What happens when your therapist is… a chatbot? For KQED health reporter Lesley McClurg, it started with a late-night spiral over dating. Instead of texting a friend, she opened ChatGPT and got the kind of calm, reassuring advice she needed. It worked… maybe a little too well.Lesley joins Morgan to dig into the rise of AI therapy, why so many people are turning to chatbots for emotional support, and what they might be risking in the process. These systems promise something traditional mental health care often can’t: instant, affordable, judgment-free access. But there are limits and, sometimes, serious consequences. Note: This episode includes discussions of suicide and mental health conditions. Listener discretion is advised. This episode first aired on April 23rd, 2025. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"socialDescription": "What happens when your therapist is… a chatbot? For KQED health reporter Lesley McClurg, it started with a late-night spiral over dating. Instead of texting a friend, she opened ChatGPT and got the kind of calm, reassuring advice she needed. It worked… maybe a little too well.Lesley joins Morgan to dig into the rise of AI therapy, why so many people are turning to chatbots for emotional support, and what they might be risking in the process. These systems promise something traditional mental health care often can’t: instant, affordable, judgment-free access. But there are limits and, sometimes, serious consequences. Note: This episode includes discussions of suicide and mental health conditions. Listener discretion is advised. This episode first aired on April 23rd, 2025. ",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "My Therapist Is a Chatbot (Reload)",
"datePublished": "2026-05-06T03:00:23-07:00",
"dateModified": "2026-05-05T18:05:44-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 33520,
"slug": "podcast",
"name": "Podcast"
},
"source": "Close All Tabs",
"sourceUrl": "https://www.kqed.org/podcasts/closealltabs",
"audioUrl": "https://traffic.megaphone.fm/KQINC4726760100.mp3?updated=1778027463",
"sticky": false,
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12082478/my-therapist-is-a-chatbot-reload",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>\u003ca href=\"#episode-transcript\">\u003ci>View the full episode transcript.\u003c/i>\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">What happens when your therapist is… a chatbot?\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">For KQED health reporter Lesley McClurg, it started with a late-night spiral over dating. Instead of texting a friend, she opened ChatGPT and got the kind of calm, reassuring advice she needed. It worked… maybe a little too well.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Lesley joins Morgan to dig into the rise of AI therapy, why so many people are turning to chatbots for emotional support, and what they might be risking in the process. These systems promise something traditional mental health care often can’t: instant, affordable, judgment-free access. But there are limits and, sometimes, serious consequences. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Note:\u003c/b>\u003cspan style=\"font-weight: 400\"> This episode includes discussions of suicide and mental health conditions. Listener discretion is advised.\u003c/span>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">This episode first aired on April 23rd, 2025 \u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003c!-- iframe plugin v.4.3 wordpress.org/plugins/iframe/ -->\u003cbr>\n\u003ciframe loading=\"lazy\" frameborder=\"0\" height=\"200\" scrolling=\"no\" src=\"https://playlist.megaphone.fm?e=KQINC4726760100\" width=\"100%\" class=\"iframe-class\">\u003c/iframe>\u003c/p>\n\u003cp>\u003cstrong>Guest:\u003c/strong>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://www.kqed.org/author/lesleymcclurg\">\u003cspan style=\"font-weight: 400\">Lesley McClurg\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">, \u003cem>KQED\u003c/em> health correspondent\u003c/span>\u003c/li>\n\u003c/ul>\n\u003cp>\u003cb>Further Reading/Listening:\u003c/b>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://www.kqed.org/science/1996504/ai-replace-therapist-benefits-risks-unsettling-truths\">\u003cspan style=\"font-weight: 400\">Can AI Replace Your Therapist? The Benefits, Risks and Unsettling Truths\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Lesley McClurg, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">KQED\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://www.npr.org/sections/shots-health-news/2025/04/07/nx-s1-5351312/artificial-intelligence-mental-health-therapy\">\u003cspan style=\"font-weight: 400\">The AI therapist can see you now\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Katia Riddle, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">NPR\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://spectrum.ieee.org/woebot\">\u003cspan style=\"font-weight: 400\">Woebot, a Mental-Health Chatbot, Tries Out Generative AI\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Casey Sackett, Devin Harper, and Aaron Pavez, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">IEEE Spectrum\u003c/span>\u003c/i>\u003ci>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.kqed.org/news/12057327/ai-prophets-and-spiritual-delusions\">AI Prophets and Spiritual Delusions\u003c/a> — \u003ci>Close All Tabs\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.psychologytoday.com/us/blog/urban-survival/202510/new-studies-reveal-mental-health-blindspots-of-ai-chatbots\">New Studies Reveal Mental Health Blindspots of AI Chatbots\u003c/a> — Marlynn Wei, \u003ci>Psychology Today\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.npr.org/2026/04/07/nx-s1-5771707/mental-health-care-workforce-artificial-intelligence-ai\">AI in the mental health care workforce is met with fear, pushback — and enthusiasm\u003c/a> — Rhitu Chatterjee, \u003ci>NPR\u003c/i>\u003c/li>\n\u003c/ul>\n\u003cp>Want to give us feedback on the show? Shoot us an email at \u003ca href=\"mailto:CloseAllTabs@KQED.org\">CloseAllTabs@KQED.org\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Follow us on\u003c/span>\u003ca href=\"https://www.instagram.com/closealltabspod/\"> \u003cspan style=\"font-weight: 400\">Instagram\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> and\u003c/span>\u003ca href=\"https://www.tiktok.com/@closealltabs\"> \u003cspan style=\"font-weight: 400\">TikTok\u003c/span>\u003c/a>\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-content post-body\">\u003ch2 id=\"episode-transcript\">Episode Transcript\u003c/h2>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">This is a computer-generated transcript. While our team has reviewed it, there may be errors.\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Hey! You’re listening to Close All Tabs, and I’m Morgan Sung. \u003c/span>\u003cspan style=\"font-weight: 400\">May is Mental Health Awareness Month. With the proliferation of AI tools over the last few years, many people have turned to chatbots for companionship, advice … and even therapy. \u003c/span>\u003cspan style=\"font-weight: 400\">It makes sense — healthcare in the US can be completely inaccessible, especially when it comes to mental health treatment. But in some cases, AI chatbots can put very vulnerable users in danger.\u003c/span>\u003cspan style=\"font-weight: 400\"> In light of that, mental health has been at the forefront of conversations about AI use. \u003c/span>\u003cspan style=\"font-weight: 400\">So today, for Mental Health Awareness Month, we’re re-airing an episode that explores that exact topic. A quick heads up: this episode includes discussion of suicide and mental health conditions, which may be distressing for some listeners. If you or someone you know needs support, we’ll have links to resources in the episode description.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">So, I was going through a divorce and started dating after my divorce and hadn’t dated in many years and came home after a date one night and was just really anxious and kind of disheveled and needed some advice. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">This is KQED health reporter Lesley McClurg. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">It was late at night and I had used ChatGPT for, you know, other things and found it pretty helpful and I thought, what about for this moment in my life? And so I asked Chat whether or not I should reach out to this person that I had just dated because I was feeling like the night hadn’t gone that well.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">It was late at night. She didn’t want to bug a friend about this, and really, she was feeling pretty vulnerable. She didn’t want to be judged. And so, ChatGPT was right there, ready to cheerfully answer her questions. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I was surprised that it was so good. I just remember after, you know, a few back and forths, I realized that really I was just nervous, really I just needed to take a deep breath. Basically I had created a big storm in my head. And Chat basically was like, “hey, chill, relax, it could have gone well. There’s another way this could have played out, not the sort of devastating reality that you’re playing out right now. Maybe give it a day or two and then reach out.” And so in that moment, it just sort of helped me take the gas off and come back into myself. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">It was exactly what she needed to hear at the time. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I didn’t text the person, which was the right call, and kind of used it as I warmed myself back up into the dating world, and it was really helpful. And so it made me then, as a reporter, start asking, “should I be telling this thing all about my love life? Is this a good idea, privacy-wise, et cetera?” And so that’s where it sort of seeded my reporting going forward. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Lesley isn’t the only one turning to ChatGPT for therapy. If you’ve ever dealt with any health insurance company, you’re probably familiar with the hassle of getting care. And mental health care is especially inaccessible. AI chatbots though, they’re convenient, cost little to nothing to use, and in Lesley’s case, can actually be pretty helpful. But a lot of people are also wary of turning to AI for therapy, can you trust it? What are you risking when you share your most vulnerable thoughts with a chatbot? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">This is Close All Tabs. I’m Morgan Sung, tech journalist and your chronically online friend, here to open as many browser tabs as it takes to help you understand how the digital world affects our real lives. Let’s get into it. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Access to actual mental health resources has become so limited. Cost and insurance aside, there’s a shortage of licensed human mental health professionals across the country. But can AI therapy really replace actual therapists? Okay, new tab. Does AI therapy work? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Over the course of your reporting, did you meet anyone who actually used an AI chatbot for therapy? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I actually talked to quite a few people who used AI therapy and I went online and read a lot of Reddit threads because this is quite the popular topic. I heard more positive stories than negatives. As a reporter, I wanted to illustrate someone who kind of had a nuanced experience, you know, good and bad. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">So, Lesley found a woman named Lilly Payne:. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">She had kind of the ideal story to illustrate that, yes, it helped her, but it wasn’t ideal. And so that was sort of like the character that I ended up, you know, focusing on. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">In your story, you mentioned that Lilly had turned to AI therapy um during the COVID lockdowns, which were a terrible time for a lot of us. But Lilly wasn’t just experiencing, you know, anxiety and depression and loneliness. Her situation was a little more complicated, right? Can you talk about that? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, I mean in her words, her life basically fell apart. She graduated from college, she had moved to New York City to pursue an arts career, was very excited. And if we can remember, you know, New York was sort of the epicenter of the early days of COVID. It was really bad. Lockdown was really scary and the hospitals were overflowing and it was not a good scene. And so she left her arts career, abandoned her dreams and moved back home, which was pretty painful, to her parents’ home in Kentucky. And she is sort of tucked away, and it just felt like a big failure. And she was really struggling with like, what’s next for my life? Where do I go from here? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lilly Payne: \u003c/b>\u003cspan style=\"font-weight: 400\">It was such a lonely time for so many people. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">This is Lilly. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lilly Payne: \u003c/b>\u003cspan style=\"font-weight: 400\">I was not at a breaking point, but I wasn’t doing awesome. So I was like, “the more help, the better.” \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">And so in all of that anxiety, she, you know, initially reached out and leaned on a lot of friends, but eventually she felt like she’d kind of worn those supports thin. And so she read about Woebot, this AI therapy platform in a health newsletter. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lilly Payne: \u003c/b>\u003cspan style=\"font-weight: 400\">So, I gave it a shot because I was like, why not? Everyone’s cooped up in their house. I will talk to this robot. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Initially it was really helpful. It did help her calm herself. I think she said she, you know, even just having it in her pocket helped her feel more in control in her life. I think she relied on it quite a bit in those early days to kind of find her ground again and be able to focus on, you know, re-imagining a new life from there while she was back at home with her parents in Kentucky. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">It’s worth noting that Woebot is a therapy-specific AI chatbot, and it doesn’t use generative AI to respond to users the way that other tools like ChatGPT, or Claude, or DeepSeek do. This means that its interactions with users are a bit more predictable. It’s also engineered to respond the way that a therapist might. So instead of immediately jumping into offering advice, Woebot asks specific questions to encourage users to reflect and do the inner work themselves. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Well, it was designed by a psychologist. And so, you know, from that perspective, it it really is designed to focus on your mental health. The goal of Woebot is, you know, as a mental health tool, as a wellness tool, I think is how they market themselves. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Woebot is designed to use a set of techniques called cognitive behavioral therapy. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">You know, cognitive behavioral therapy helps you reframe your negative thoughts using specific exercises. And, you know, I think as any CBT, which is what it’s acronym is, it feels a little forced, but she did say it did help her reframe those negative thoughts and that she was able to think more more positively. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. Can you talk about uh Lilly’s uh other diagnosis that maybe complicated this form of treatment? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">She has obsessive-compulsive disorder, and sometimes that makes her fixate on worst-case scenarios. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lilly Payne: \u003c/b>\u003cspan style=\"font-weight: 400\">Most of the time when people think about OCD they think of, just the very cliche like, “oh, you can’t stop washing your hands, you’re afraid of germs.” While that is a very real subtype that people experience, typically OCD like manifests in really taboo intrusive thoughts, and then the physical compulsions stem from trying to keep those themes away. And so, logically, you can know that, like, this doesn’t make sense, it’s not actually happening, but it just, it, it’s not just in your head, like physically it feels so real. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Lilly is also diagnosed with anxiety and depression. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">A symptom of depression is suicide ideation eventually, right? So she fixated on the idea that eventually because of her depression, that she may think about killing herself. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lilly Payne: \u003c/b>\u003cspan style=\"font-weight: 400\">My brain would be like, “Oh, you’ve struggled with depression in the past. There’s no saying that one day you won’t want to go through with suicide.”\u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">And so she mentioned that she was worried about suicide in a session with Woebot. And Woebot came back and had a crisis alert and said, “hey, you better call the suicide hotline.” And she said, “no, no no, wait a second.”\u003c/span>\u003c/p>\n\u003cp>\u003cb>Lilly Payne: \u003c/b>\u003cspan style=\"font-weight: 400\">I’m not experiencing suicidal inclinations, I’m just terrified that I will. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">And luckily she knew that, she understood her disorder enough to know that nuance and to know what was happening in her brain because she had done so much previous therapy. But she said, you know, if she hadn’t really understood her disease, having that crisis alert come up may have even added more stress. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lilly Payne: \u003c/b>\u003cspan style=\"font-weight: 400\">I would have freaked out and been like, “oh my gosh, this this thing that is supposed to have this mental health knowledge thinks that I am suicidal. I must be suicidal, I must be a danger to myself.” \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">So, you know, in defense of Woebot, they came back and said, “hey, we’re not, you know, specifically targeting or for people who have OCD. We really are just a wellness tool. “But her story illustrates where AI doesn’t necessarily have the nuance, the understanding — that a human, like a human therapist would have picked up on that. They would have understood that she had OCD and really understood the nuances of that, whereas in this case, Woebot didn’t. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Right. It sounds like Wobot was inadvertently validating this intrusive thought that she was having because she has OCD. And when you’re really depressed or anxious, it might be helpful for your feelings to be validated like that. But how does that compare to the recommended treatment for OCD? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I mean the recommended treatment for OCD is generally exposure therapy. So you expose yourself to whatever you’re scared of. And so in this case, a therapist would work with her in terms of exposing herself to those ideas, probably walk her through, you know, reality, et cetera, in a way that allows her to lean into her fears so that they’re not as scary and sort of wound up and keep going. And sort of overtake her. Whereas you, like a therapist wouldn’t stand up with a red flag and say, “Oh my God, you really are suicidal. Therefore you should call a hotline.” Right? Which is basically what Woebot did. Yeah.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Lilly’s case is just one example of the limits of AI therapy. Responding with a crisis alert wasn’t helpful for her specific needs, but it’s probably good that Woebot even has those guardrails in place. But what happens when AI chatbots go off script? How bad can it get? We’ll get into that when we come back. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">New tab. AI therapy … worst case scenarios. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">So Woebot can’t necessarily respond with the nuance of an actual human therapist. But it seems like it wasn’t giving Lilly bad advice. Um but let’s talk about examples of AI therapy doing the exact opposite of what it’s supposed to do. What happened with the National Eating Disorder Association hotline? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, that didn’t play out very well. They created a bot named Tessa and some of the users found that Tessa was giving them dieting advice. So these are folks \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Oh god. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Who have, you know, anorexia, bulimia, and somehow Tessa’s wires got crossed and people were getting the exact advice that would be really dangerous for their eating disorders. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Sharon Maxwell: \u003c/b>\u003cspan style=\"font-weight: 400\">The recommendations that Tessa gave me was that I could lose one to two pounds per week, that I should eat no more than 2,000 calories in a day, that I should have a calorie deficit of 500 to 1,000 calories per day. All of which might sound benign to the general listener, however, to an individual with an eating disorder, the focus of weight loss really fuels the eating disorder. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">That was Sharon Maxwell, an eating disorder recovery educator, speaking to NPR about her experience with Tessa. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">So, NEDA, the National Eating Disorder Association, you know, pulled Tessa down and said, “this isn’t working very well.”\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">And it sounds like they just didn’t have that kind of guardrail in place. Like they didn’t anticipate that. Um, so even if Lilly didn’t really need Woebot to immediately jump into crisis mode, at least it had that guardrail to say, like, “hey, crisis.” But in the past, other AI chatbots have gotten into serious trouble for not responding to users’ red flags and just validating their responses. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">And that happened in the case of Character AI, this AI app that lets users personalize an AI companion based on fictional characters, celebrities, historical figures, all that. Until a recent lawsuit, Character AI did not have any safety measures or disclaimers warning users that they weren’t talking to a real person. What led to this lawsuit? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, there was a 14-year-old who grew really attached to his character that he had created. Like you said, Character AI lets you create a character and then interact with that character. And, you know, not surprisingly, kind of like I did in my first experience with ChatGPT, it feels so good that you develop a little bit of an emotional connection. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">And so this 14-year-old did that over the course of several months. And then he started opening up about some of the distress that he was feeling. And the character, instead of steering, you know, this 14-year-old towards help, unfortunately the bot allegedly reinforced some suicidal thoughts and eventually the boy ended up taking his life. And so the lawsuit, \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">That’s terrible. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Exactly, it was really kind of horrific and it’s not the only one like this. There’s only a handful at this point, but it really is raising the red flag that these very empathetic responses are sort of like, you know parroting back, which is, again, what some AI does. Uh it can play out really, really poorly. \u003c/span>\u003c/p>\n\u003cp>\u003cstrong>Morgan Sung:\u003c/strong> In January 2026, Character.AI agreed to settle multiple lawsuits that alleged that the chatbot contributed to mental health issues among teenagers. Other companies have faced similar lawsuits, after several users died by suicide, allegedly at the encouragement of chatbots.\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">So what happened with the eating disorder hotline and Character AI, those are pretty extreme cases. Will most people actually experience those worst case scenarios? In your research, did you find anything about that? Or is it just like, are these just edge cases? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I mean we don’t have numbers yet. I think it’s really early in the arc of this technology. I think the experts are most worried about platforms that are like Character AI, where you are building a relationship with a character. In their defense, they’re not built as mental health tools, right? These are not marketing themselves as mental health tools. They are, you know, marketing themselves as, “hey, here, we’re going to give you a friend.” \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Yet, you know, like a friend, like you and I probably do with our friends, we lean on our friends. We talk to our friends. We build emotional connections with our friends. We trust our friends for the right advice, right? And these are robots. So that relationship is not uh, you know, built on human connection. And like we can see it can go wrong. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Another concern that I have, you know, as a tech reporter is uh privacy. ChatGPT, for example, isn’t HIPAA compliant. Could you explain what HIPPA is and why it’s necessary with medical information? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, I mean HIPAA is the regulation that keeps all of our data safe. So when you go to the doctor, a doctor is required to keep all of your medical information, you know, totally private. It’s not going to be given anywhere. It’s not going to leak away. That is the privacy regulations. Now, some of these platforms, you know, for example, like Woebot, uh Rosebud is one, which is a platform that’s more like a journaling service. Uh you know, they say they’re HIPAA compliant, but there’s no one regulating them. It’s not like the American Medical Association is regulating them. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">So, that data, you don’t really know where it’s going. You’re trusting these companies who are profit driven. You know, I mean, hopefully Woebot and Rosebud, you know, are following their own promises to their consumers. But there might be other companies that, you know, definitely ChatGPT is not, you know, promising that they’re HIPAA compliant. And, you know, that information is being used, is being put out there to retrain the model. And so, you know, hopefully they’re not gonna sell your data to advertisers. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">You know, also, I mean, the kind of a worst-case scenario, this fortunately hasn’t happened yet, but, you know, what if your mental health information gets out there, an insurance company gets wind of that, and your premiums start going up because they know that you’re struggling with something. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Oh wow. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">So, you know, again, that hasn’t happen yet. Those are sort of like the worst- case scenarios. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">But again, worst-case scenarios. Right. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Exactly. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. Obviously, the priority of pretty much any for profit company is to monetize. But, do AI companies have any incentive to improve as more people turn to their products for therapy, even if they aren’t necessarily mental health specific chatbots? Um, you know, are there better safety measures, more transparency about data collection, especially given the Character AI lawsuit? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I think they have that incentive. They also have the incentive to keep you hooked. So I think that’s the sort of like fine line. We’ve seen that with all social media, right? They’re getting a lot better at keeping our attention. AI companies have the same needs and incentives to keep people coming back. And so, you know, I think it’s gonna be a gray area and it’s going to be, unfortunately, like the social media companies, it’s gonna be really up to the creators of these products on whether or not they’re gonna have a really ethical orientation. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Despite all of these issues, therapy is so inaccessible that unfortunately, AI chatbots might feel like the only immediate tool that people have when seeking treatment. How did we get here? Let’s open one last tab. The mental healthcare crisis. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">You had mentioned earlier the state of mental health care. Why is it so hard to see a therapist? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, I mean, the demand for mental health services is really at an all-time high, and it’s surged even more, you know, since the pandemic began and continues to do so. I think there’s something like one in five Americans have some kind of a mental health issue, and yet they have a significant barrier to getting to a therapist. You know, I think it’s 55% of counties, people don’t have access to a psychotherapist or a social worker or a psychologist. They’re just aren’t any in that area. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">And so, you know, I think because of this issue that these sort of mental health deserts, AI is a kind of natural fill-in. You know, It’s available 24-7. You don’t need insurance to get there. You don’t have a high deductible. Uh, you don’t have to prove to anyone that, you know, that you have a mental health condition. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">You don’t get accepted. Uh, so it’s easy and accessible. And I think that it will mean that more and more people are going to use this and hopefully, they’ll be well-informed consumers. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. You know, given the shortage of providers, and like you mentioned, insurance issues. Um, since the pandemic started, telehealth therapy has become pretty popular. But I’ve seen a lot of complaints about these kind of quick, one-size-fits-all mental health care platforms like BetterHelp, which matches users with Licensed Therapist or Cerebral, which sets users up with a psychiatrist that can prescribe medications like antidepressants or ADHD meds. And both of these services were created to, kind of fill this void that you’re talking about, but at the same time, they’re kind of plagued with their own issues. It seems like making therapy quick and accessible isn’t always as easy as it seems. What do you think? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I think there’s absolutely a role for telehealth. I think there’s absolutely a role for AI therapy. I think anyone would probably say that having a really heartfelt connection with a therapist in an office, live human, feels different than if you are talking to a screen. And the emotional repair that can happen in that session with a live human I think is different and potentially more profound than with a robot. That might change over time. You know, I don’t know how good these things are going to get. They already feel a little bit too good for my own comfort. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Uh, but they might, they might get even better. You know, same thing, I think the telehealth model at this point is pretty early. I think that they are still refining how well those things work. I think it’s similar with AI therapy. And, you know, I think the, the tricky thing here as well with any of these technological solutions is that we are also living in a pretty isolated way in our lives right now. If you’re taking even like your therapy to a computer, that’s one less human that you’re interacting with. And maybe you’re, you know, mental health issues are because you’re dealing with isolation, with estrangement, with disconnection. Those feelings might even become more escalated if you’re, you know, using telehealth or using AI therapy. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">I saw both responses and reflections when I was reading these Reddit threads, you know, from people who were in rural places. I knew that they were feeling more isolated using an AI therapy and others who said, “you know, it was a godsend because I was so alone, at least someone was listening to me. ” \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">For your story on AI therapy, you talked to a bunch of psychologists and, you know, real-life human psychologists, um and, you know, someone from the American Psychological Association. Are human therapists concerned about being replaced by AI? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I don’t hear that from them yet. Number one, they’re still really in high demand. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">So I don’t think they’re feeling that crunch yet. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">There’s still a shortage, right? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">There’s still a huge shortage. And I think, they’re, they’re fairly confident that what they offer is different than what AI therapy offers. And, you know, they can pick up on subtle cues that AI, you know, can’t, say like body language or, you know, pace of speech. These things can reveal a lot about our mental health state, and AI can’t pick up on that stuff. So, and in the deeper bonds, the deeper attachment work that you might do in therapy, I think therapists are quite confident that they’re still better at that. Uh, so in this moment, I would say they’re not, they’re not especially worried. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">We’ve talked about the downsides of AI therapy pretty extensively. Um but, you had mentioned like that they can kind of be a tool in a bigger treatment plan while also seeing a real therapist. If someone is going to use AI therapy, how should they approach it? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. Yeah. I think, that’s the message I hope comes across in my reporting, is that, you know, there are these worst case scenarios. Again, I think that the consumer should be educated on how their data is going to be used and understand how the company operates so that they’re not sharing uh really vulnerable information. But I think as a sort of, you know, addition to your yoga, your meditation, your, uh, you know, walks in nature, I think AI can really be a self-regulation tool. And I think it can be used quite well. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">You know, I talked to one company, Rosebud, which is a kind of journaling platform, which it asks you questions to kind of inspire you to express whatever’s going on and help you reflect. And it can follow a thread. So if you mentioned something two weeks ago about your relationship and what was going wrong, it will check in with you about what is happening and help you make sense of that. And I was on it. You know, I’m not a huge pen and paper person. You know, I don’t write anything anymore, so my arm hurts really quickly. And so, I enjoy, you know, I like just would pick up my phone and I would journal just, you know, talking to it and it would ask me questions and it felt, you know, fairly similar to a conversation with a friend. And I would always feel quite a bit better afterwards. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">So, in that sense, I think it can be quite helpful because, you know, maybe I’m in therapy once a week, but I’m having a panic attack on Monday night and my, you know, appointment is not until Thursday. I think in that sense, you know, it’s four o’clock in the morning. I can’t call a human therapist no matter what, even if I do have one. You know, to sit down and have the opportunity to have something that’s engaging me, um, I think can be really helpful. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">I’m really curious, since you started reporting on this story, have you used ChatGPT, uh, not necessarily as a therapist, but you know, as this kind of mental health tool that you’re talking about since? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">I wish I had the positive spin to be like, “yes, I’m relying on it all the time.” You know, I didn’t and I don’t. Um, I felt a little bit like one more thing to do. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Right. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">And I felt similarly, you know, we talked about Lilly at the beginning of the story, and the reason that she stopped using Woebot was not because, you know, it had the crisis alert or it sort of like poorly dealt with her OCD, she got tired of being on her phone. She was like, “I didn’t want to be on my phone anymore. I wanted to talk to someone.”. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">And I feel that. You know, that was, that was, kind of my reasoning, you know, because of my job, I’m on a computer, you know, nearly all day long, and I didn’t want one more thing on the computer or one more thing on my phone. I can imagine, you know, if I was going through a really tough time again, you know, turning to it. Um, luckily, I’m in a bit of a good moment, so I haven’t been using it. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. You can unplug now. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Lesley McClurg: \u003c/b>\u003cspan style=\"font-weight: 400\">Exactly. I’m going to enjoy this moment and ride the wave of goodness. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Thanks again to KQED’s Lesley McClurg: for talking with us about this story. You can check out more of her reporting on healthcare, including this story on AI Therapy at KQED.org. Again, AI therapy tools work best when they’re used in addition to treatment under a licensed professional. But if it’s the only option accessible to you right now, there are AI tools specifically designed for mental health and wellness that might be more useful than the general chatbots like ChatGPT or Claude. For now, let’s close these tabs. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Close All Tabs is a production of KQED Studios, and is reported and hosted by me, Morgan Sung. \u003c/span>\u003cspan style=\"font-weight: 400\">This episode was produced by Maya Cueva and edited by Chris Egusa, who also composed our theme song and credits music.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">The Close All Tabs team also includes editor Chris Hambrick and audio engineer Brendan Willard. Additional music by APM.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Audience engagement support from Maha Sanad. Jen Chien is our Director of Podcasts, and Ethan Toven-Lindsey is our Editor in Chief.\u003c/span>\u003cb>\u003c/b>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Some members of the KQED podcast team are represented by The Screen Actors Guild, American Federation of Television and Radio Artists. San Francisco Northern California Local.\u003c/span>\u003cb>\u003c/b>\u003c/p>\n\u003cp>\u003cb>\u003c/b>Keyboard sounds were recorded on my purple and pink Dustsilver K-84 wired mechanical keyboard with Gateron Red switches. If you have feedback, or a topic you think we should cover, hit us up at CloseAllTabs@kqed.org. \u003cspan style=\"font-weight: 400\">And if you’re enjoying the show, give us a rating on Apple Podcasts or whatever platform you use. Thanks for listening. \u003c/span>\u003c/p>\n\u003cp> \u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>"
}
],
"link": "/news/12082478/my-therapist-is-a-chatbot-reload",
"authors": [
"11944",
"11869",
"11832",
"11943"
],
"programs": [
"news_35082"
],
"categories": [
"news_33520"
],
"tags": [
"news_25184",
"news_36279",
"news_22973",
"news_3137",
"news_34646",
"news_2109",
"news_1631",
"news_20782"
],
"featImg": "news_12082483",
"label": "source_news_12082478"
},
"news_12081916": {
"type": "posts",
"id": "news_12081916",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12081916",
"score": null,
"sort": [
1777591777000
]
},
"guestAuthors": [],
"slug": "are-elon-musk-and-openai-fighting-an-ai-arms-race-sam-altmans-lawyers-think-so",
"title": "Are Elon Musk and OpenAI Fighting an AI Arms Race? Sam Altman’s Lawyers Think So",
"publishDate": 1777591777,
"format": "standard",
"headTitle": "Are Elon Musk and OpenAI Fighting an AI Arms Race? Sam Altman’s Lawyers Think So | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>As Elon Musk’s dayslong testimony in his \u003ca href=\"https://www.kqed.org/news/12081290/how-to-unscramble-an-omelet-in-silicon-valley-the-musk-v-altman-trial-that-will-try\">case against OpenAI co-founder Sam Altman\u003c/a> came to a close Thursday, defense attorneys aimed to paint the world’s richest man as intent on dominating artificial intelligence — not on protecting the world from it.\u003c/p>\n\u003cp>Under cross-examination in an Oakland court, attorneys for Altman and Microsoft, the company’s largest financial backer and which until this week held the exclusive rights to license and sell its technology, held Musk’s feet to the fire about a number of business moves he’s made — both within and outside of OpenAI — that might give jurors pause about whether he operated so differently from his former colleagues in the race to dominate the field.\u003c/p>\n\u003cp>During hours of testimony, Musk has told the court that he cofounded the nonprofit OpenAI with Altman and OpenAI President Greg Brockman\u003ca href=\"https://www.kqed.org/news/12081603/elon-musk-takes-aim-at-openai-as-trial-begins-its-not-ok-to-steal-a-charity\"> in 2015 altruistically\u003c/a>, fearing the dangers of AI and wanting to ensure that the technology was developed in a safe and open-source way. He brought the suit, he said, after deciding that his co-founders \u003ca href=\"https://www.kqed.org/news/12081798/elon-musk-says-sam-altman-tricked-him-into-funding-openai\">had betrayed that intention\u003c/a> — expanding the company into a tech behemoth valued at $852 billion today.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>But Microsoft attorney Russell Cohen seemed to point to a different motivation: a desire to beat OpenAI and win the AI race.\u003c/p>\n\u003cp>“You didn’t sue Microsoft [and OpenAI] until November 2024, correct?” Cohen said.\u003c/p>\n\u003cp>“Yes,” Musk said.\u003c/p>\n\u003cp>“And that is after you formed your own AI company, xAI, correct?” Cohen said.\u003c/p>\n\u003cp>“Yes,” Musk said.\u003c/p>\n\u003cfigure id=\"attachment_12081637\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081637\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">OpenAI’s lead counsel William Savitt presents opening statements in the trial in which Elon Musk claims that Sam Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The questions came after William Savitt, Altman’s attorney, directed the jury’s attention to a bombshell message Musk sent to Mark Zuckerberg in February 2025, asking whether the Meta CEO would be “open to the idea of bidding on the OpenAI IP,” or intellectual property, with Musk and others.\u003c/p>\n\u003cp>The jury also learned that xAI had partially “distilled,” that is, derived technology from OpenAI’s own models, which violates OpenAI’s terms of service.\u003c/p>\n\u003cp>The pointed inquiries on Thursday came after Musk’s testimony started to bring the events of how OpenAI launched its first for-profit subsidiary into focus. In 2017, executives including Altman, Musk, Brockman and Ilya Sutskever, a top computer scientist at the company since its founding, launched discussions about creating a for-profit subsidiary.\u003c/p>\n\u003cp>It would be, they said, a way to bring in additional funding and keep at the cutting edge of a growing field of competitors as they started pursuing artificial general intelligence — commonly referred to as AGI — a futuristic superintelligent AI technology.[aside postID=news_12081798 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1.jpg']Altman’s defense has alleged that throughout that process, Musk attempted to “wrest control” of the company twice, first insisting that he hold a majority equity stake in any for-profit entity, control its board of directors and serve as CEO, and later, that OpenAI be folded into Tesla, where he already serves as CEO.\u003c/p>\n\u003cp>Savitt said Musk began withholding $5 million quarterly fund contributions to put pressure on the company to grant his requests, and after those attempts failed, he left the company.\u003c/p>\n\u003cp>Savitt also accused Musk of poaching OpenAI employees as Musk exited in early 2018, including founding member Andrej Karpathy, for Tesla.\u003c/p>\n\u003cp>Musk said multiple times that Tesla is not pursuing AGI. But in March, Musk \u003ca href=\"https://x.com/elonmusk/status/2029123591871308272?lang=en\">wrote on the social media platform X\u003c/a> that “Tesla will be one of the companies to make AGI and probably the first to make it in humanoid/atom-shaping form.”\u003c/p>\n\u003cp>Separately, he formed xAI in 2023, which he said is pursuing AGI.\u003c/p>\n\u003cp>He’s downplayed its competitiveness with OpenAI, though, testifying that it has just a couple hundred employees and a “small market share.”\u003c/p>\n\u003cp>“I would say technically competitive, but much smaller than OpenAI,” Musk said Tuesday.\u003c/p>\n\u003cfigure id=\"attachment_10734536\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-10734536\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2015/10/StuartRussell.jpg\" alt='Stuart Russell, UC Berkeley computer science professor and co-author of the standard textbook \"Artificial Intelligence: a Modern Approach.\"' width=\"1920\" height=\"1320\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2015/10/StuartRussell.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2015/10/StuartRussell-400x275.jpg 400w, https://cdn.kqed.org/wp-content/uploads/sites/10/2015/10/StuartRussell-800x550.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2015/10/StuartRussell-1440x990.jpg 1440w, https://cdn.kqed.org/wp-content/uploads/sites/10/2015/10/StuartRussell-1180x811.jpg 1180w, https://cdn.kqed.org/wp-content/uploads/sites/10/2015/10/StuartRussell-960x660.jpg 960w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Stuart Russell, UC Berkeley computer science professor and co-author of the standard textbook “Artificial Intelligence: A Modern Approach.” \u003ccite>(Juan Mabromata/AFP via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The three days of Musk’s testimony got testy at times, particularly during Savitt’s cross-examination on Wednesday afternoon, when Savitt and U.S. District Judge Yvonne Gonzalez Rogers asked Musk repeatedly to answer the questions he was asked. Musk accused Savitt of intentionally misleading him.\u003c/p>\n\u003cp>But the most heated moment thus far might have come before the jury was called to the courtroom on Thursday morning, during a discussion about what AI safety expert Stuart Russell, who is taking the stand this afternoon, will be willing to testify to.\u003c/p>\n\u003cp>Musk’s attorney argued he should be allowed to speak about the climate risk associated with AI, saying: “We could all die.”\u003c/p>\n\u003cp>“It is also ironic that your client, despite these risks, is creating a company in the exact space,” Gonzalez Rogers said. “I suspect that there are people who don’t want to put the future in Mr. Musk’s hands.”\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "The Tesla CEO said OpenAI betrayed its original mission as a nonprofit. But defense attorneys representing Altman and Microsoft used social media and email evidence to question Musk's own motives for getting involved. ",
"status": "publish",
"parent": 0,
"modified": 1777653621,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 24,
"wordCount": 920
},
"headData": {
"title": "Are Elon Musk and OpenAI Fighting an AI Arms Race? Sam Altman’s Lawyers Think So | KQED",
"description": "The Tesla CEO said OpenAI betrayed its original mission as a nonprofit. But defense attorneys representing Altman and Microsoft used social media and email evidence to question Musk's own motives for getting involved. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Are Elon Musk and OpenAI Fighting an AI Arms Race? Sam Altman’s Lawyers Think So",
"datePublished": "2026-04-30T16:29:37-07:00",
"dateModified": "2026-05-01T09:40:21-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"sticky": false,
"nprStoryId": "kqed-12081916",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12081916/are-elon-musk-and-openai-fighting-an-ai-arms-race-sam-altmans-lawyers-think-so",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>As Elon Musk’s dayslong testimony in his \u003ca href=\"https://www.kqed.org/news/12081290/how-to-unscramble-an-omelet-in-silicon-valley-the-musk-v-altman-trial-that-will-try\">case against OpenAI co-founder Sam Altman\u003c/a> came to a close Thursday, defense attorneys aimed to paint the world’s richest man as intent on dominating artificial intelligence — not on protecting the world from it.\u003c/p>\n\u003cp>Under cross-examination in an Oakland court, attorneys for Altman and Microsoft, the company’s largest financial backer and which until this week held the exclusive rights to license and sell its technology, held Musk’s feet to the fire about a number of business moves he’s made — both within and outside of OpenAI — that might give jurors pause about whether he operated so differently from his former colleagues in the race to dominate the field.\u003c/p>\n\u003cp>During hours of testimony, Musk has told the court that he cofounded the nonprofit OpenAI with Altman and OpenAI President Greg Brockman\u003ca href=\"https://www.kqed.org/news/12081603/elon-musk-takes-aim-at-openai-as-trial-begins-its-not-ok-to-steal-a-charity\"> in 2015 altruistically\u003c/a>, fearing the dangers of AI and wanting to ensure that the technology was developed in a safe and open-source way. He brought the suit, he said, after deciding that his co-founders \u003ca href=\"https://www.kqed.org/news/12081798/elon-musk-says-sam-altman-tricked-him-into-funding-openai\">had betrayed that intention\u003c/a> — expanding the company into a tech behemoth valued at $852 billion today.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>But Microsoft attorney Russell Cohen seemed to point to a different motivation: a desire to beat OpenAI and win the AI race.\u003c/p>\n\u003cp>“You didn’t sue Microsoft [and OpenAI] until November 2024, correct?” Cohen said.\u003c/p>\n\u003cp>“Yes,” Musk said.\u003c/p>\n\u003cp>“And that is after you formed your own AI company, xAI, correct?” Cohen said.\u003c/p>\n\u003cp>“Yes,” Musk said.\u003c/p>\n\u003cfigure id=\"attachment_12081637\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081637\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">OpenAI’s lead counsel William Savitt presents opening statements in the trial in which Elon Musk claims that Sam Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The questions came after William Savitt, Altman’s attorney, directed the jury’s attention to a bombshell message Musk sent to Mark Zuckerberg in February 2025, asking whether the Meta CEO would be “open to the idea of bidding on the OpenAI IP,” or intellectual property, with Musk and others.\u003c/p>\n\u003cp>The jury also learned that xAI had partially “distilled,” that is, derived technology from OpenAI’s own models, which violates OpenAI’s terms of service.\u003c/p>\n\u003cp>The pointed inquiries on Thursday came after Musk’s testimony started to bring the events of how OpenAI launched its first for-profit subsidiary into focus. In 2017, executives including Altman, Musk, Brockman and Ilya Sutskever, a top computer scientist at the company since its founding, launched discussions about creating a for-profit subsidiary.\u003c/p>\n\u003cp>It would be, they said, a way to bring in additional funding and keep at the cutting edge of a growing field of competitors as they started pursuing artificial general intelligence — commonly referred to as AGI — a futuristic superintelligent AI technology.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12081798",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Altman’s defense has alleged that throughout that process, Musk attempted to “wrest control” of the company twice, first insisting that he hold a majority equity stake in any for-profit entity, control its board of directors and serve as CEO, and later, that OpenAI be folded into Tesla, where he already serves as CEO.\u003c/p>\n\u003cp>Savitt said Musk began withholding $5 million quarterly fund contributions to put pressure on the company to grant his requests, and after those attempts failed, he left the company.\u003c/p>\n\u003cp>Savitt also accused Musk of poaching OpenAI employees as Musk exited in early 2018, including founding member Andrej Karpathy, for Tesla.\u003c/p>\n\u003cp>Musk said multiple times that Tesla is not pursuing AGI. But in March, Musk \u003ca href=\"https://x.com/elonmusk/status/2029123591871308272?lang=en\">wrote on the social media platform X\u003c/a> that “Tesla will be one of the companies to make AGI and probably the first to make it in humanoid/atom-shaping form.”\u003c/p>\n\u003cp>Separately, he formed xAI in 2023, which he said is pursuing AGI.\u003c/p>\n\u003cp>He’s downplayed its competitiveness with OpenAI, though, testifying that it has just a couple hundred employees and a “small market share.”\u003c/p>\n\u003cp>“I would say technically competitive, but much smaller than OpenAI,” Musk said Tuesday.\u003c/p>\n\u003cfigure id=\"attachment_10734536\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-10734536\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2015/10/StuartRussell.jpg\" alt='Stuart Russell, UC Berkeley computer science professor and co-author of the standard textbook \"Artificial Intelligence: a Modern Approach.\"' width=\"1920\" height=\"1320\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2015/10/StuartRussell.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2015/10/StuartRussell-400x275.jpg 400w, https://cdn.kqed.org/wp-content/uploads/sites/10/2015/10/StuartRussell-800x550.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2015/10/StuartRussell-1440x990.jpg 1440w, https://cdn.kqed.org/wp-content/uploads/sites/10/2015/10/StuartRussell-1180x811.jpg 1180w, https://cdn.kqed.org/wp-content/uploads/sites/10/2015/10/StuartRussell-960x660.jpg 960w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Stuart Russell, UC Berkeley computer science professor and co-author of the standard textbook “Artificial Intelligence: A Modern Approach.” \u003ccite>(Juan Mabromata/AFP via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The three days of Musk’s testimony got testy at times, particularly during Savitt’s cross-examination on Wednesday afternoon, when Savitt and U.S. District Judge Yvonne Gonzalez Rogers asked Musk repeatedly to answer the questions he was asked. Musk accused Savitt of intentionally misleading him.\u003c/p>\n\u003cp>But the most heated moment thus far might have come before the jury was called to the courtroom on Thursday morning, during a discussion about what AI safety expert Stuart Russell, who is taking the stand this afternoon, will be willing to testify to.\u003c/p>\n\u003cp>Musk’s attorney argued he should be allowed to speak about the climate risk associated with AI, saying: “We could all die.”\u003c/p>\n\u003cp>“It is also ironic that your client, despite these risks, is creating a company in the exact space,” Gonzalez Rogers said. “I suspect that there are people who don’t want to put the future in Mr. Musk’s hands.”\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12081916/are-elon-musk-and-openai-fighting-an-ai-arms-race-sam-altmans-lawyers-think-so",
"authors": [
"11913",
"251"
],
"categories": [
"news_28250",
"news_8",
"news_248"
],
"tags": [
"news_1386",
"news_3897",
"news_27626",
"news_34054",
"news_33542",
"news_33543",
"news_34586",
"news_1631"
],
"featImg": "news_12081606",
"label": "news"
},
"news_12081798": {
"type": "posts",
"id": "news_12081798",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12081798",
"score": null,
"sort": [
1777507270000
]
},
"guestAuthors": [],
"slug": "elon-musk-says-sam-altman-tricked-him-into-funding-openai",
"title": "Elon Musk Says Sam Altman Tricked Him Into Funding OpenAI",
"publishDate": 1777507270,
"format": "standard",
"headTitle": "Elon Musk Says Sam Altman Tricked Him Into Funding OpenAI | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>During the second day of the \u003ca href=\"https://www.kqed.org/news/12081603/elon-musk-takes-aim-at-openai-as-trial-begins-its-not-ok-to-steal-a-charity\">landmark trial between Sam Altman and Elon Musk\u003c/a>, the Tesla founder told the Oakland courthouse that he was a “fool” to fund OpenAI through its early years.\u003c/p>\n\u003cp>Testifying in the lawsuit he brought against Altman, which claims the company’s creators betrayed their mission for profits, Musk suggested Wednesday that Altman and cofounder Greg Brockman wanted to “have your cake and eat it too.”\u003c/p>\n\u003cp>“If you go nonprofit, you’ve got a sort of moral high ground,” he testified.\u003c/p>\n\u003cp>Musk’s testimony tells one version of founding OpenAI: that he, fearing the dangers of artificial intelligence, pursued its development with the goal of benefiting the common good, alongside, he thought, like-minded collaborators. But behind the scenes, those cofounders engaged in a “long con” to profit at his expense.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>“What they really wanted was a for-profit, where they could make as much money as possible,” Musk said later.\u003c/p>\n\u003cp>Whether the jury believes him will be integral to the decision they’re tasked with making, as they determine whether OpenAI breached charitable trust and engaged in unjust enrichment as it evolved from a nonprofit organization to its current $730 billion iteration.\u003c/p>\n\u003cp>Under cross-examination, Altman’s attorney, William Savitt, questioned Musk’s story and credibility as an altruistic benefactor. He pointed to an email Musk sent to Altman in 2015, which said it would be “probably better” if OpenAI operated as a for-profit company with a parallel nonprofit.\u003c/p>\n\u003cfigure id=\"attachment_12081637\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081637\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">OpenAI’s lead counsel, William Savitt, presents opening statements in the trial in which Elon Musk claims that Sam Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>In another email sent to colleagues at his neurotechnology company, Neuralink, Musk said that Google’s AI development was moving very fast, and that he was concerned OpenAI was not on the path to catch up.\u003c/p>\n\u003cp>“Setting it up as a nonprofit might, in hindsight, have been the wrong move,” Musk wrote. “Sense of urgency is not as high.”\u003c/p>\n\u003cp>Savitt asked if, in 2017, Musk suggested at a party that OpenAI should create a for-profit. He said it was just after the company’s AI model had beaten \u003cem>Defense of the Ancients, \u003c/em>a battle video game, which was a pivotal moment in the development process.\u003c/p>\n\u003cp>Musk said he didn’t remember giving instructions to create a for-profit at the time.\u003c/p>\n\u003cp>“This was nine years ago,” he said.[aside postID=news_12081603 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED.jpg']Savitt said Tuesday that in 2017, OpenAI executives, including Musk, were in the midst of conversations about whether and how to transition the company to a for-profit structure.\u003c/p>\n\u003cp>According to OpenAI’s court filings, as early as summer 2017, Musk had insisted on holding a majority equity stake in any for-profit entity, serving as CEO and controlling its board of directors.\u003c/p>\n\u003cp>Pressed by Savitt about what Musk meant by “expressing what you said about control,” the Tesla founder and billionaire said: “I try to be as literal as possible.”\u003c/p>\n\u003cp>In the fall of 2017, Brockman and Ilya Sutskever, another top OpenAI executive, emailed Musk with concerns about the for-profit structure he proposed. Shortly thereafter, discussions over the structure collapsed, and Musk stopped making significant quarterly funding contributions, OpenAI alleges.\u003c/p>\n\u003cp>He left the company less than six months later.\u003c/p>\n\u003cp>Savitt framed the breakdown and Musk’s exit as a result of his not getting control of the for-profit, and the other executives’ focus on maintaining its philanthropic mission. He suggested that Musk tried to pressure them to accept his terms by pausing the majority of his financial backing.\u003c/p>\n\u003cp>“You knew that would create financial pressure for the organization,” Savitt said.\u003c/p>\n\u003cfigure id=\"attachment_12081686\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081686\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Steve Molo, Elon Musk’s attorney, presents opening statements in the trial in which Elon Musk (center-right) claims that Sam Altman (right) and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Musk denied that was his intention. Instead, he alleged that Altman convinced Brockman and the others to go against his proposal, and that their concern over his desire for control was disingenuous.\u003c/p>\n\u003cp>“I’m not going to fund something if I don’t have confidence in the people,” he said.\u003c/p>\n\u003cp>When asked whether he proposed that OpenAI be folded into Tesla, Musk said: “There were a lot of ideas that were brainstormed at the time.”\u003c/p>\n\u003cp>In an email, he wrote that doing so would be the “only path that could even hope to hold a candle to Google.”\u003c/p>\n\u003cp>Musk said he left OpenAI in February 2018 because he was focused on Tesla’s survival, and believed that OpenAI intended to continue operating as a nonprofit.\u003c/p>\n\u003cp>Savitt also laid out a series of exchanges between Musk and Altman, in which the OpenAI CEO kept him apprised of the company’s corporate structure. He said in March 2018, Musk responded to an email that noted the creation of a for-profit entity of OpenAI with “OK by me,” and was sent a term sheet for OpenAI LP that summer.[aside postID=news_12081290 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED.jpg']Savitt also said Altman emailed Musk a draft of the company’s public announcement of its for-profit arm in March 2019, and texted him asking if he had time to talk about Microsoft’s plan to invest in OpenAI. Musk never responded to that text, according to Savitt.\u003c/p>\n\u003cp>Musk said he was busy with his other companies in 2018, and while he was aware that it had added a for-profit entity, he hadn’t lost complete faith in the company. While he’d suspended quarterly $5 million funding contributions prior to his departure, he continued to make some contributions until 2020.\u003c/p>\n\u003cp>He said that he’d gone from enthusiastically supportive to uncertain about OpenAI’s mission, but that he’d fully suspended his contributions when he felt that the company was “deliberately not a nonprofit.”\u003c/p>\n\u003cp>When asked why he waited until 2024 to bring the suit, Musk said that’s when he determined OpenAI breached charitable trust.\u003c/p>\n\u003cp>“Thinking that someone might steal your car is not the same as [if] someone has stolen your car,” Musk said. He said after enlisting his attorney, Alex Spiro, to investigate, he heard from him in 2023 that “the car had been stolen.”\u003c/p>\n\u003cp>“I would have sued sooner if I thought the charity had been stolen sooner,” Musk continued.\u003c/p>\n\u003cp>The trial and Musk’s testimony are expected to continue on Thursday.\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "On the second day of a trial pitting the Tesla founder against OpenAI, Elon Musk said he was a “fool” to support the company behind ChatGPT during its early years.",
"status": "publish",
"parent": 0,
"modified": 1777509912,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 32,
"wordCount": 1208
},
"headData": {
"title": "Elon Musk Says Sam Altman Tricked Him Into Funding OpenAI | KQED",
"description": "On the second day of a trial pitting the Tesla founder against OpenAI, Elon Musk said he was a “fool” to support the company behind ChatGPT during its early years.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Elon Musk Says Sam Altman Tricked Him Into Funding OpenAI",
"datePublished": "2026-04-29T17:01:10-07:00",
"dateModified": "2026-04-29T17:45:12-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"sticky": false,
"nprStoryId": "kqed-12081798",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12081798/elon-musk-says-sam-altman-tricked-him-into-funding-openai",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>During the second day of the \u003ca href=\"https://www.kqed.org/news/12081603/elon-musk-takes-aim-at-openai-as-trial-begins-its-not-ok-to-steal-a-charity\">landmark trial between Sam Altman and Elon Musk\u003c/a>, the Tesla founder told the Oakland courthouse that he was a “fool” to fund OpenAI through its early years.\u003c/p>\n\u003cp>Testifying in the lawsuit he brought against Altman, which claims the company’s creators betrayed their mission for profits, Musk suggested Wednesday that Altman and cofounder Greg Brockman wanted to “have your cake and eat it too.”\u003c/p>\n\u003cp>“If you go nonprofit, you’ve got a sort of moral high ground,” he testified.\u003c/p>\n\u003cp>Musk’s testimony tells one version of founding OpenAI: that he, fearing the dangers of artificial intelligence, pursued its development with the goal of benefiting the common good, alongside, he thought, like-minded collaborators. But behind the scenes, those cofounders engaged in a “long con” to profit at his expense.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>“What they really wanted was a for-profit, where they could make as much money as possible,” Musk said later.\u003c/p>\n\u003cp>Whether the jury believes him will be integral to the decision they’re tasked with making, as they determine whether OpenAI breached charitable trust and engaged in unjust enrichment as it evolved from a nonprofit organization to its current $730 billion iteration.\u003c/p>\n\u003cp>Under cross-examination, Altman’s attorney, William Savitt, questioned Musk’s story and credibility as an altruistic benefactor. He pointed to an email Musk sent to Altman in 2015, which said it would be “probably better” if OpenAI operated as a for-profit company with a parallel nonprofit.\u003c/p>\n\u003cfigure id=\"attachment_12081637\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081637\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">OpenAI’s lead counsel, William Savitt, presents opening statements in the trial in which Elon Musk claims that Sam Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>In another email sent to colleagues at his neurotechnology company, Neuralink, Musk said that Google’s AI development was moving very fast, and that he was concerned OpenAI was not on the path to catch up.\u003c/p>\n\u003cp>“Setting it up as a nonprofit might, in hindsight, have been the wrong move,” Musk wrote. “Sense of urgency is not as high.”\u003c/p>\n\u003cp>Savitt asked if, in 2017, Musk suggested at a party that OpenAI should create a for-profit. He said it was just after the company’s AI model had beaten \u003cem>Defense of the Ancients, \u003c/em>a battle video game, which was a pivotal moment in the development process.\u003c/p>\n\u003cp>Musk said he didn’t remember giving instructions to create a for-profit at the time.\u003c/p>\n\u003cp>“This was nine years ago,” he said.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12081603",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Savitt said Tuesday that in 2017, OpenAI executives, including Musk, were in the midst of conversations about whether and how to transition the company to a for-profit structure.\u003c/p>\n\u003cp>According to OpenAI’s court filings, as early as summer 2017, Musk had insisted on holding a majority equity stake in any for-profit entity, serving as CEO and controlling its board of directors.\u003c/p>\n\u003cp>Pressed by Savitt about what Musk meant by “expressing what you said about control,” the Tesla founder and billionaire said: “I try to be as literal as possible.”\u003c/p>\n\u003cp>In the fall of 2017, Brockman and Ilya Sutskever, another top OpenAI executive, emailed Musk with concerns about the for-profit structure he proposed. Shortly thereafter, discussions over the structure collapsed, and Musk stopped making significant quarterly funding contributions, OpenAI alleges.\u003c/p>\n\u003cp>He left the company less than six months later.\u003c/p>\n\u003cp>Savitt framed the breakdown and Musk’s exit as a result of his not getting control of the for-profit, and the other executives’ focus on maintaining its philanthropic mission. He suggested that Musk tried to pressure them to accept his terms by pausing the majority of his financial backing.\u003c/p>\n\u003cp>“You knew that would create financial pressure for the organization,” Savitt said.\u003c/p>\n\u003cfigure id=\"attachment_12081686\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081686\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Steve Molo, Elon Musk’s attorney, presents opening statements in the trial in which Elon Musk (center-right) claims that Sam Altman (right) and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Musk denied that was his intention. Instead, he alleged that Altman convinced Brockman and the others to go against his proposal, and that their concern over his desire for control was disingenuous.\u003c/p>\n\u003cp>“I’m not going to fund something if I don’t have confidence in the people,” he said.\u003c/p>\n\u003cp>When asked whether he proposed that OpenAI be folded into Tesla, Musk said: “There were a lot of ideas that were brainstormed at the time.”\u003c/p>\n\u003cp>In an email, he wrote that doing so would be the “only path that could even hope to hold a candle to Google.”\u003c/p>\n\u003cp>Musk said he left OpenAI in February 2018 because he was focused on Tesla’s survival, and believed that OpenAI intended to continue operating as a nonprofit.\u003c/p>\n\u003cp>Savitt also laid out a series of exchanges between Musk and Altman, in which the OpenAI CEO kept him apprised of the company’s corporate structure. He said in March 2018, Musk responded to an email that noted the creation of a for-profit entity of OpenAI with “OK by me,” and was sent a term sheet for OpenAI LP that summer.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12081290",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Savitt also said Altman emailed Musk a draft of the company’s public announcement of its for-profit arm in March 2019, and texted him asking if he had time to talk about Microsoft’s plan to invest in OpenAI. Musk never responded to that text, according to Savitt.\u003c/p>\n\u003cp>Musk said he was busy with his other companies in 2018, and while he was aware that it had added a for-profit entity, he hadn’t lost complete faith in the company. While he’d suspended quarterly $5 million funding contributions prior to his departure, he continued to make some contributions until 2020.\u003c/p>\n\u003cp>He said that he’d gone from enthusiastically supportive to uncertain about OpenAI’s mission, but that he’d fully suspended his contributions when he felt that the company was “deliberately not a nonprofit.”\u003c/p>\n\u003cp>When asked why he waited until 2024 to bring the suit, Musk said that’s when he determined OpenAI breached charitable trust.\u003c/p>\n\u003cp>“Thinking that someone might steal your car is not the same as [if] someone has stolen your car,” Musk said. He said after enlisting his attorney, Alex Spiro, to investigate, he heard from him in 2023 that “the car had been stolen.”\u003c/p>\n\u003cp>“I would have sued sooner if I thought the charity had been stolen sooner,” Musk continued.\u003c/p>\n\u003cp>The trial and Musk’s testimony are expected to continue on Thursday.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12081798/elon-musk-says-sam-altman-tricked-him-into-funding-openai",
"authors": [
"11913",
"251"
],
"categories": [
"news_31795",
"news_6188",
"news_8",
"news_248"
],
"tags": [
"news_34755",
"news_32668",
"news_3897",
"news_27626",
"news_19954",
"news_21891",
"news_34054",
"news_33542",
"news_33543",
"news_34586",
"news_1631",
"news_57"
],
"featImg": "news_12081681",
"label": "news"
},
"news_12081721": {
"type": "posts",
"id": "news_12081721",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12081721",
"score": null,
"sort": [
1777456853000
]
},
"guestAuthors": [],
"slug": "somebodys-watching-me-the-crackdown-on-stalkerware",
"title": "Somebody’s Watching Me: The Crackdown on Stalkerware",
"publishDate": 1777456853,
"format": "audio",
"headTitle": "Somebody’s Watching Me: The Crackdown on Stalkerware | KQED",
"labelTerm": {},
"content": "\u003cp>\u003ca href=\"#episode-transcript\">\u003ci>View the full episode transcript.\u003c/i>\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">In 2018, researcher Eva Galperin made a discovery about a colleague. He had been sexually abusing women for decades, and threatening to expose their private information using “stalkerware” — hidden applications that allow people to spy on another person’s private life through their mobile device. This set Eva on a new path. She went on to found the Coalition Against Stalkerware, a network of researchers and advocacy groups working to limit the spread of stalkerware and support survivors of tech-enabled abuse. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Eva joins Morgan to talk about how her background in cybersecurity allowed her to help countless survivors of stalkerware abuse, and how activists and researchers are beginning to turn the tide against a sprawling, largely hidden industry. \u003c/span>\u003c/p>\n\u003cp>\u003c!-- iframe plugin v.4.3 wordpress.org/plugins/iframe/ -->\u003cbr>\n\u003ciframe loading=\"lazy\" frameborder=\"0\" height=\"200\" scrolling=\"no\" src=\"https://playlist.megaphone.fm?e=KQINC4327771430\" width=\"100%\" class=\"iframe-class\">\u003c/iframe>\u003c/p>\n\u003cp>\u003cstrong>Guest:\u003c/strong>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://www.eff.org/about/staff/eva-galperin\">\u003cspan style=\"font-weight: 400\">Eva Galperin\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">, director of cybersecurity at the Electronic Frontier Foundation\u003c/span>\u003c/li>\n\u003c/ul>\n\u003cp>\u003cb>Further Reading/Listening:\u003c/b>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://stopstalkerware.org/\">\u003cspan style=\"font-weight: 400\">What is stalkerware?\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">Coalition Against Stalkerware \u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://techcrunch.com/2026/02/09/hacked-leaked-exposed-why-you-should-stop-using-stalkerware-apps/\">\u003cspan style=\"font-weight: 400\">Hacked, leaked, exposed: Why you should never use stalkerware apps\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Lorenzo Franceschi-Bicchierai, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">TechCrunch \u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://www.theverge.com/2018/2/21/17035552/sexual-assault-harassment-whisper-network-reporting-failure-marquis-boire\">\u003cspan style=\"font-weight: 400\">When whisper networks let us down\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Sarah Jeong, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">The Verge\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://www.vice.com/en/article/spyware-company-spyfone-terabytes-data-exposed-online-leak/\">\u003cspan style=\"font-weight: 400\">Spyware Company Leaves ‘Terabytes’ of Selfies, Text Messages, and Location Data Exposed Online\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Lorenzo Franceschi-Bicchierai, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">Vice \u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://techcrunch.com/2021/10/19/stalkerware-security-phone-data-thousands/\">\u003cspan style=\"font-weight: 400\">A massive ‘stalkerware’ leak puts the phone data of thousands at risk \u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Zack Whittaker, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">TechCrunch \u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://techcrunch.com/2022/12/17/support-king-ftc-spytrac/\">\u003cspan style=\"font-weight: 400\">Support King, banned by FTC, linked to new phone spying operation\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Zack Whittaker, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">TechCrunch \u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://www.eff.org/deeplinks/2025/11/eff-teams-av-comparatives-test-android-stalkerware-detection-major-antivirus-apps\">EFF Teams Up With AV Comparatives to Test Android Stalkerware Detection by Major Antivirus Apps \u003c/a>— Eva Galperin, \u003ci>Electronic Frontier Foundation\u003c/i>\u003c/li>\n\u003c/ul>\n\u003cp>Want to give us feedback on the show? Shoot us an email at \u003ca href=\"mailto:CloseAllTabs@KQED.org\">CloseAllTabs@KQED.org\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Follow us on\u003c/span>\u003ca href=\"https://www.instagram.com/closealltabspod/\"> \u003cspan style=\"font-weight: 400\">Instagram\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> and\u003c/span>\u003ca href=\"https://www.tiktok.com/@closealltabs\"> \u003cspan style=\"font-weight: 400\">TikTok\u003c/span>\u003c/a>\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003ch2 id=\"episode-transcript\">Episode Transcript\u003c/h2>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">A full transcript will be available 1–2 workdays after the episode’s publication.\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "Eva Galperin talks about how activists and researchers are fighting back against the shadowy stalkerware industry.",
"status": "publish",
"parent": 0,
"modified": 1777451777,
"stats": {
"hasAudio": true,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 11,
"wordCount": 302
},
"headData": {
"title": "Somebody’s Watching Me: The Crackdown on Stalkerware | KQED",
"description": "In 2018, researcher Eva Galperin made a discovery about a colleague. He had been sexually abusing women for decades, and threatening to expose their private information using “stalkerware” — hidden applications that allow people to spy on another person’s private life through their mobile device. This set Eva on a new path. She went on to found the Coalition Against Stalkerware, a network of researchers and advocacy groups working to limit the spread of stalkerware and support survivors of tech-enabled abuse. Eva joins Morgan to talk about how her background in cybersecurity allowed her to help countless survivors of stalkerware abuse, and how activists and researchers are beginning to turn the tide against a sprawling, largely hidden industry.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"socialDescription": "In 2018, researcher Eva Galperin made a discovery about a colleague. He had been sexually abusing women for decades, and threatening to expose their private information using “stalkerware” — hidden applications that allow people to spy on another person’s private life through their mobile device. This set Eva on a new path. She went on to found the Coalition Against Stalkerware, a network of researchers and advocacy groups working to limit the spread of stalkerware and support survivors of tech-enabled abuse. Eva joins Morgan to talk about how her background in cybersecurity allowed her to help countless survivors of stalkerware abuse, and how activists and researchers are beginning to turn the tide against a sprawling, largely hidden industry.",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Somebody’s Watching Me: The Crackdown on Stalkerware",
"datePublished": "2026-04-29T03:00:53-07:00",
"dateModified": "2026-04-29T01:36:17-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 33520,
"slug": "podcast",
"name": "Podcast"
},
"source": "Close All Tabs",
"sourceUrl": "https://www.kqed.org/podcasts/closealltabs",
"audioUrl": "https://traffic.megaphone.fm/KQINC4327771430.mp3",
"sticky": false,
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12081721/somebodys-watching-me-the-crackdown-on-stalkerware",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>\u003ca href=\"#episode-transcript\">\u003ci>View the full episode transcript.\u003c/i>\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">In 2018, researcher Eva Galperin made a discovery about a colleague. He had been sexually abusing women for decades, and threatening to expose their private information using “stalkerware” — hidden applications that allow people to spy on another person’s private life through their mobile device. This set Eva on a new path. She went on to found the Coalition Against Stalkerware, a network of researchers and advocacy groups working to limit the spread of stalkerware and support survivors of tech-enabled abuse. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Eva joins Morgan to talk about how her background in cybersecurity allowed her to help countless survivors of stalkerware abuse, and how activists and researchers are beginning to turn the tide against a sprawling, largely hidden industry. \u003c/span>\u003c/p>\n\u003cp>\u003c!-- iframe plugin v.4.3 wordpress.org/plugins/iframe/ -->\u003cbr>\n\u003ciframe loading=\"lazy\" frameborder=\"0\" height=\"200\" scrolling=\"no\" src=\"https://playlist.megaphone.fm?e=KQINC4327771430\" width=\"100%\" class=\"iframe-class\">\u003c/iframe>\u003c/p>\n\u003cp>\u003cstrong>Guest:\u003c/strong>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://www.eff.org/about/staff/eva-galperin\">\u003cspan style=\"font-weight: 400\">Eva Galperin\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">, director of cybersecurity at the Electronic Frontier Foundation\u003c/span>\u003c/li>\n\u003c/ul>\n\u003cp>\u003cb>Further Reading/Listening:\u003c/b>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://stopstalkerware.org/\">\u003cspan style=\"font-weight: 400\">What is stalkerware?\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">Coalition Against Stalkerware \u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://techcrunch.com/2026/02/09/hacked-leaked-exposed-why-you-should-stop-using-stalkerware-apps/\">\u003cspan style=\"font-weight: 400\">Hacked, leaked, exposed: Why you should never use stalkerware apps\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Lorenzo Franceschi-Bicchierai, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">TechCrunch \u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://www.theverge.com/2018/2/21/17035552/sexual-assault-harassment-whisper-network-reporting-failure-marquis-boire\">\u003cspan style=\"font-weight: 400\">When whisper networks let us down\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Sarah Jeong, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">The Verge\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://www.vice.com/en/article/spyware-company-spyfone-terabytes-data-exposed-online-leak/\">\u003cspan style=\"font-weight: 400\">Spyware Company Leaves ‘Terabytes’ of Selfies, Text Messages, and Location Data Exposed Online\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Lorenzo Franceschi-Bicchierai, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">Vice \u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://techcrunch.com/2021/10/19/stalkerware-security-phone-data-thousands/\">\u003cspan style=\"font-weight: 400\">A massive ‘stalkerware’ leak puts the phone data of thousands at risk \u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Zack Whittaker, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">TechCrunch \u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://techcrunch.com/2022/12/17/support-king-ftc-spytrac/\">\u003cspan style=\"font-weight: 400\">Support King, banned by FTC, linked to new phone spying operation\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Zack Whittaker, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">TechCrunch \u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://www.eff.org/deeplinks/2025/11/eff-teams-av-comparatives-test-android-stalkerware-detection-major-antivirus-apps\">EFF Teams Up With AV Comparatives to Test Android Stalkerware Detection by Major Antivirus Apps \u003c/a>— Eva Galperin, \u003ci>Electronic Frontier Foundation\u003c/i>\u003c/li>\n\u003c/ul>\n\u003cp>Want to give us feedback on the show? Shoot us an email at \u003ca href=\"mailto:CloseAllTabs@KQED.org\">CloseAllTabs@KQED.org\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Follow us on\u003c/span>\u003ca href=\"https://www.instagram.com/closealltabspod/\"> \u003cspan style=\"font-weight: 400\">Instagram\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> and\u003c/span>\u003ca href=\"https://www.tiktok.com/@closealltabs\"> \u003cspan style=\"font-weight: 400\">TikTok\u003c/span>\u003c/a>\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-content post-body\">\u003ch2 id=\"episode-transcript\">Episode Transcript\u003c/h2>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">A full transcript will be available 1–2 workdays after the episode’s publication.\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>"
}
],
"link": "/news/12081721/somebodys-watching-me-the-crackdown-on-stalkerware",
"authors": [
"11944",
"11943",
"11869",
"11832"
],
"programs": [
"news_35082"
],
"categories": [
"news_33520"
],
"tags": [
"news_22973",
"news_17619",
"news_22844",
"news_3137",
"news_34646",
"news_2414",
"news_2125",
"news_1859",
"news_1631"
],
"featImg": "news_12081722",
"label": "source_news_12081721"
},
"news_12081603": {
"type": "posts",
"id": "news_12081603",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12081603",
"score": null,
"sort": [
1777421165000
]
},
"guestAuthors": [],
"slug": "elon-musk-takes-aim-at-openai-as-trial-begins-its-not-ok-to-steal-a-charity",
"title": "Elon Musk Takes Aim at OpenAI as Trial Begins: ‘It’s Not OK to Steal a Charity’",
"publishDate": 1777421165,
"format": "standard",
"headTitle": "Elon Musk Takes Aim at OpenAI as Trial Begins: ‘It’s Not OK to Steal a Charity’ | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>In a federal courtroom in Oakland on Tuesday, attorneys for tech elites Sam Altman and Elon Musk set the stage for a \u003ca href=\"https://www.kqed.org/news/12081290/how-to-unscramble-an-omelet-in-silicon-valley-the-musk-v-altman-trial-that-will-try\">landmark case to determine whether OpenAI\u003c/a>, one of the most powerful artificial intelligence companies in the world, was founded on a lie.\u003c/p>\n\u003cp>At issue is whether the company’s stated mission — to lead AI development to benefit the common good — was authentic or a deceptive pitch designed to attract talent and investment. \u003ca href=\"https://www.kqed.org/forum/2010101912956/its-elon-musks-world-were-just-living-in-it\">Musk\u003c/a> alleges that co-founders Altman and Greg Brockman, who remains Altman’s second-in-command, participated in a “long con” to enrich themselves at his expense, after the three co-founded OpenAI as a nonprofit in 2015.\u003c/p>\n\u003cp>“They’re going to make this lawsuit very complicated, but it’s very simple,” Musk said of OpenAI on the stand on Tuesday afternoon. “It’s not OK to steal a charity.”\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>He departed the company after a falling out and \u003ca href=\"https://www.courtlistener.com/docket/69013420/musk-v-altman/\">sued the company\u003c/a> in 2024, alleging that OpenAI had breached charitable trust by restructuring as a for-profit company, now valued at more than $800 billion.\u003c/p>\n\u003cp>But Altman’s attorneys called the Tesla CEO’s behavior “a tale of two Musks,” shifting from pushing for OpenAI to become a for-profit company under his control, to caring about its nonprofit status only after launching competitor xAI in 2023. They argue OpenAI’s decision to adopt a for-profit structure was integral to its survival.\u003c/p>\n\u003cp>“We’re here because Mr. Musk didn’t get his way,” William Savitt, Altman’s lead attorney, said Tuesday. “And because he’s a competitor, he’ll do anything he can to attack OpenAI.”\u003c/p>\n\u003cfigure id=\"attachment_12081686\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081686\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Steve Molo, Elon Musk’s attorney, presents opening statements in the trial in which Elon Musk (center-right) claims that Sam Altman (right) and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Steven Molo, Musk’s counsel, told the jury that when Musk, Altman and Brockman set out to found an AI nonprofit, their goals were to develop the technology safely and for the \u003ca href=\"https://www.kqed.org/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises\">benefit of humanity\u003c/a>.\u003c/p>\n\u003cp>“It wasn’t a technology to get rich,” he said.\u003c/p>\n\u003cp>After operating as a strict nonprofit for years, OpenAI added a for-profit arm in 2019, which executives said was necessary to obtain the funding needed to develop artificial general intelligence — a more advanced AI technology that surpasses human intelligence, according to court filings.\u003c/p>\n\u003cp>In early conversations about how the for-profit entity would work, Molo said, the structure was likened to a museum gift shop whose revenue funds the institution’s galleries and operations. Brockman and Altman reassured Musk that they were still committed to the nonprofit structure, he said.\u003c/p>\n\u003cp>But behind the scenes, Molo alleges that the other co-founders had more lucrative desires.[aside postID=news_12081290 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED.jpg']In court filings, he cited a journal in which Brockman wrote that “it would be nice to be making the billions … we’ve been thinking that maybe we should just flip to a for-profit. making the money for us sounds great and all.”\u003c/p>\n\u003cp>Brockman also wrote that he and another top OpenAI executive, Ilya Sutskever, “cannot say that we are committed to the non-profit. don’t wanna say that we’re committed. If three months later we’re doing B-Corp [a certification for for-profit corporations with social and environmental missions], then it was a lie.”\u003c/p>\n\u003cp>Years later, after Musk had departed OpenAI, the company was “no longer operating for the good of humanity,” Molo said.\u003c/p>\n\u003cp>“The museum store sold the Picassos,” he said.\u003c/p>\n\u003cp>Musk’s lawsuit claims OpenAI breached charitable trust and alleges unjust enrichment, which means that one party unfairly benefits at the expense of another. He also accuses Microsoft, which is the company’s largest financial backer and until this week held the exclusive rights to license and sell its technology, of aiding and abetting OpenAI’s breach of charitable trust.\u003c/p>\n\u003cp>OpenAI’s defense, meanwhile, alleges that Musk’s suit is less motivated by a desire to do good than it is by vengeance for his former colleagues, whose company is now eyeing an initial public offering valued at up to $1 trillion.\u003c/p>\n\u003cp>“Musk sat on his claims for years,” Savitt said. “He knew everything that was happening when it was happening. My clients had the nerve to go out and succeed without him.”\u003c/p>\n\u003cp>He also pointed out that Musk launched xAI a year before bringing the lawsuit, which would make OpenAI his competitor.\u003c/p>\n\u003cfigure id=\"attachment_12081681\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081681\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Representing Microsoft, Russell Coan (left) speaks as Elon Musk watches in the trial in which Elon Musk claims that Sam Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Savitt pointed to moments early in OpenAI’s development, when Musk suggested that it would be “probably better” for the company to operate as a “standard C corp[oration] with a parallel nonprofit.” He initially promised to cover the balance of the funding it needed, but reneged when he didn’t get to control the company, Savitt told the jury.\u003c/p>\n\u003cp>Musk was in the middle of the conversations about pivoting from a nonprofit, Savitt said. As early as the summer of 2017, he insisted on holding a majority equity stake in any for-profit entity, as well as controlling its board of directors and serving as CEO, according to OpenAI’s court filings.\u003c/p>\n\u003cp>In the fall of that year, after Brockman and Sutskever emailed Musk with concerns about the for-profit structure he proposed, the discussions collapsed, OpenAI alleges. After that, Musk stopped making significant quarterly funding contributions, and he left the company less than six months later.\u003c/p>\n\u003cp>Around that time, Brockman and Altman moved to pursue a for-profit arm — a decision their attorneys say they told Musk about prior to his departure from the board.[aside postID=news_12079896 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Daniel-Moreno-Gama-AP.jpg']Savitt said in court that Musk had given the company less than 4% of the funding he’d promised. While OpenAI had gotten contributions from other donors, he said, those “kept the lights on, but it wasn’t nearly enough to stay on the cutting edge.”\u003c/p>\n\u003cp>“They needed to get the money from somewhere, or else the project collapsed,” he said, alleging that donors weren’t willing to make the billion-dollar contributions that OpenAI needed without an expectation of return.\u003c/p>\n\u003cp>Since OpenAI established its first for-profit subsidiary, which capped investor returns at 100 times their investment, its business has exploded. It’s now a public benefit corporation, required to consider its mission statement but not necessarily to prioritize it.\u003c/p>\n\u003cp>Over the years, its mission statement has been changed several times. In 2023, according to the nonprofit parent organization’s \u003ca href=\"https://cdn.theconversation.com/static_files/files/4099/2023-IRS990-OpenAI.pdf?1770819990\">IRS disclosure form\u003c/a>, it sought to build AI that “safely benefits humanity, unconstrained by a need to generate financial return.” But last year, \u003ca href=\"https://app.candid.org/profile/9571629/openai-81-0861541?activeTab=7\">that same form\u003c/a> included a shorter mission statement — one that removed the word “safely” and any mention of finances, Tufts University business professor Alnoor Ebrahim \u003ca href=\"https://theconversation.com/openai-has-deleted-the-word-safely-from-its-mission-and-its-new-structure-is-a-test-for-whether-ai-serves-society-or-shareholders-274467\">wrote in \u003cem>The Conversation\u003c/em>\u003c/a>, an academic news outlet.\u003c/p>\n\u003cp>Former OpenAI employees have left and started a competitor, Anthropic, citing concerns over safety and the company’s direction. In 2023, OpenAI executives and board members, including Sutskever, staged a coup to briefly oust Altman as CEO. They said there’d been a breakdown in trust between him and the board, and that Altman engaged in a pattern of deception and wasn’t “consistently candid in his communications.”\u003c/p>\n\u003cp>Whether Altman’s and OpenAI’s pitch to develop their technology for the benefit of the world is an example of that deception is part of what jurors will aim to root out in the current trial.\u003c/p>\n\u003cp>“I didn’t want to pave the road to hell with good intentions,” Musk said on the stand on Tuesday afternoon. “If you have somebody who’s not trustworthy in charge of AI, I think that’s very dangerous for the whole world.”\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "In a federal courtroom in Oakland, attorneys for tech elites Sam Altman and Elon Musk painted very different pictures of the early years of OpenAI and its mission to benefit the common good.",
"status": "publish",
"parent": 0,
"modified": 1777482966,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 30,
"wordCount": 1473
},
"headData": {
"title": "Elon Musk Takes Aim at OpenAI as Trial Begins: ‘It’s Not OK to Steal a Charity’ | KQED",
"description": "In a federal courtroom in Oakland, attorneys for tech elites Sam Altman and Elon Musk painted very different pictures of the early years of OpenAI and its mission to benefit the common good.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Elon Musk Takes Aim at OpenAI as Trial Begins: ‘It’s Not OK to Steal a Charity’",
"datePublished": "2026-04-28T17:06:05-07:00",
"dateModified": "2026-04-29T10:16:06-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"audioUrl": "https://traffic.omny.fm/d/clips/0af137ef-751e-4b19-a055-aaef00d2d578/87fdd794-f90e-4280-920f-ab89016e8062/3ac84f6e-ca1f-4213-bd14-b43a01848097/audio.mp3",
"sticky": false,
"nprStoryId": "kqed-12081603",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12081603/elon-musk-takes-aim-at-openai-as-trial-begins-its-not-ok-to-steal-a-charity",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>In a federal courtroom in Oakland on Tuesday, attorneys for tech elites Sam Altman and Elon Musk set the stage for a \u003ca href=\"https://www.kqed.org/news/12081290/how-to-unscramble-an-omelet-in-silicon-valley-the-musk-v-altman-trial-that-will-try\">landmark case to determine whether OpenAI\u003c/a>, one of the most powerful artificial intelligence companies in the world, was founded on a lie.\u003c/p>\n\u003cp>At issue is whether the company’s stated mission — to lead AI development to benefit the common good — was authentic or a deceptive pitch designed to attract talent and investment. \u003ca href=\"https://www.kqed.org/forum/2010101912956/its-elon-musks-world-were-just-living-in-it\">Musk\u003c/a> alleges that co-founders Altman and Greg Brockman, who remains Altman’s second-in-command, participated in a “long con” to enrich themselves at his expense, after the three co-founded OpenAI as a nonprofit in 2015.\u003c/p>\n\u003cp>“They’re going to make this lawsuit very complicated, but it’s very simple,” Musk said of OpenAI on the stand on Tuesday afternoon. “It’s not OK to steal a charity.”\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>He departed the company after a falling out and \u003ca href=\"https://www.courtlistener.com/docket/69013420/musk-v-altman/\">sued the company\u003c/a> in 2024, alleging that OpenAI had breached charitable trust by restructuring as a for-profit company, now valued at more than $800 billion.\u003c/p>\n\u003cp>But Altman’s attorneys called the Tesla CEO’s behavior “a tale of two Musks,” shifting from pushing for OpenAI to become a for-profit company under his control, to caring about its nonprofit status only after launching competitor xAI in 2023. They argue OpenAI’s decision to adopt a for-profit structure was integral to its survival.\u003c/p>\n\u003cp>“We’re here because Mr. Musk didn’t get his way,” William Savitt, Altman’s lead attorney, said Tuesday. “And because he’s a competitor, he’ll do anything he can to attack OpenAI.”\u003c/p>\n\u003cfigure id=\"attachment_12081686\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081686\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Steve Molo, Elon Musk’s attorney, presents opening statements in the trial in which Elon Musk (center-right) claims that Sam Altman (right) and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Steven Molo, Musk’s counsel, told the jury that when Musk, Altman and Brockman set out to found an AI nonprofit, their goals were to develop the technology safely and for the \u003ca href=\"https://www.kqed.org/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises\">benefit of humanity\u003c/a>.\u003c/p>\n\u003cp>“It wasn’t a technology to get rich,” he said.\u003c/p>\n\u003cp>After operating as a strict nonprofit for years, OpenAI added a for-profit arm in 2019, which executives said was necessary to obtain the funding needed to develop artificial general intelligence — a more advanced AI technology that surpasses human intelligence, according to court filings.\u003c/p>\n\u003cp>In early conversations about how the for-profit entity would work, Molo said, the structure was likened to a museum gift shop whose revenue funds the institution’s galleries and operations. Brockman and Altman reassured Musk that they were still committed to the nonprofit structure, he said.\u003c/p>\n\u003cp>But behind the scenes, Molo alleges that the other co-founders had more lucrative desires.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12081290",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>In court filings, he cited a journal in which Brockman wrote that “it would be nice to be making the billions … we’ve been thinking that maybe we should just flip to a for-profit. making the money for us sounds great and all.”\u003c/p>\n\u003cp>Brockman also wrote that he and another top OpenAI executive, Ilya Sutskever, “cannot say that we are committed to the non-profit. don’t wanna say that we’re committed. If three months later we’re doing B-Corp [a certification for for-profit corporations with social and environmental missions], then it was a lie.”\u003c/p>\n\u003cp>Years later, after Musk had departed OpenAI, the company was “no longer operating for the good of humanity,” Molo said.\u003c/p>\n\u003cp>“The museum store sold the Picassos,” he said.\u003c/p>\n\u003cp>Musk’s lawsuit claims OpenAI breached charitable trust and alleges unjust enrichment, which means that one party unfairly benefits at the expense of another. He also accuses Microsoft, which is the company’s largest financial backer and until this week held the exclusive rights to license and sell its technology, of aiding and abetting OpenAI’s breach of charitable trust.\u003c/p>\n\u003cp>OpenAI’s defense, meanwhile, alleges that Musk’s suit is less motivated by a desire to do good than it is by vengeance for his former colleagues, whose company is now eyeing an initial public offering valued at up to $1 trillion.\u003c/p>\n\u003cp>“Musk sat on his claims for years,” Savitt said. “He knew everything that was happening when it was happening. My clients had the nerve to go out and succeed without him.”\u003c/p>\n\u003cp>He also pointed out that Musk launched xAI a year before bringing the lawsuit, which would make OpenAI his competitor.\u003c/p>\n\u003cfigure id=\"attachment_12081681\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081681\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Representing Microsoft, Russell Coan (left) speaks as Elon Musk watches in the trial in which Elon Musk claims that Sam Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Savitt pointed to moments early in OpenAI’s development, when Musk suggested that it would be “probably better” for the company to operate as a “standard C corp[oration] with a parallel nonprofit.” He initially promised to cover the balance of the funding it needed, but reneged when he didn’t get to control the company, Savitt told the jury.\u003c/p>\n\u003cp>Musk was in the middle of the conversations about pivoting from a nonprofit, Savitt said. As early as the summer of 2017, he insisted on holding a majority equity stake in any for-profit entity, as well as controlling its board of directors and serving as CEO, according to OpenAI’s court filings.\u003c/p>\n\u003cp>In the fall of that year, after Brockman and Sutskever emailed Musk with concerns about the for-profit structure he proposed, the discussions collapsed, OpenAI alleges. After that, Musk stopped making significant quarterly funding contributions, and he left the company less than six months later.\u003c/p>\n\u003cp>Around that time, Brockman and Altman moved to pursue a for-profit arm — a decision their attorneys say they told Musk about prior to his departure from the board.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12079896",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Daniel-Moreno-Gama-AP.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Savitt said in court that Musk had given the company less than 4% of the funding he’d promised. While OpenAI had gotten contributions from other donors, he said, those “kept the lights on, but it wasn’t nearly enough to stay on the cutting edge.”\u003c/p>\n\u003cp>“They needed to get the money from somewhere, or else the project collapsed,” he said, alleging that donors weren’t willing to make the billion-dollar contributions that OpenAI needed without an expectation of return.\u003c/p>\n\u003cp>Since OpenAI established its first for-profit subsidiary, which capped investor returns at 100 times their investment, its business has exploded. It’s now a public benefit corporation, required to consider its mission statement but not necessarily to prioritize it.\u003c/p>\n\u003cp>Over the years, its mission statement has been changed several times. In 2023, according to the nonprofit parent organization’s \u003ca href=\"https://cdn.theconversation.com/static_files/files/4099/2023-IRS990-OpenAI.pdf?1770819990\">IRS disclosure form\u003c/a>, it sought to build AI that “safely benefits humanity, unconstrained by a need to generate financial return.” But last year, \u003ca href=\"https://app.candid.org/profile/9571629/openai-81-0861541?activeTab=7\">that same form\u003c/a> included a shorter mission statement — one that removed the word “safely” and any mention of finances, Tufts University business professor Alnoor Ebrahim \u003ca href=\"https://theconversation.com/openai-has-deleted-the-word-safely-from-its-mission-and-its-new-structure-is-a-test-for-whether-ai-serves-society-or-shareholders-274467\">wrote in \u003cem>The Conversation\u003c/em>\u003c/a>, an academic news outlet.\u003c/p>\n\u003cp>Former OpenAI employees have left and started a competitor, Anthropic, citing concerns over safety and the company’s direction. In 2023, OpenAI executives and board members, including Sutskever, staged a coup to briefly oust Altman as CEO. They said there’d been a breakdown in trust between him and the board, and that Altman engaged in a pattern of deception and wasn’t “consistently candid in his communications.”\u003c/p>\n\u003cp>Whether Altman’s and OpenAI’s pitch to develop their technology for the benefit of the world is an example of that deception is part of what jurors will aim to root out in the current trial.\u003c/p>\n\u003cp>“I didn’t want to pave the road to hell with good intentions,” Musk said on the stand on Tuesday afternoon. “If you have somebody who’s not trustworthy in charge of AI, I think that’s very dangerous for the whole world.”\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12081603/elon-musk-takes-aim-at-openai-as-trial-begins-its-not-ok-to-steal-a-charity",
"authors": [
"11913",
"251"
],
"categories": [
"news_6188",
"news_28250",
"news_8",
"news_248"
],
"tags": [
"news_34755",
"news_32668",
"news_18352",
"news_3897",
"news_27626",
"news_19954",
"news_34054",
"news_33542",
"news_33543",
"news_34586",
"news_1631"
],
"featImg": "news_12081639",
"label": "news"
},
"news_12081290": {
"type": "posts",
"id": "news_12081290",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12081290",
"score": null,
"sort": [
1777287633000
]
},
"guestAuthors": [],
"slug": "how-to-unscramble-an-omelet-in-silicon-valley-the-musk-v-altman-trial-that-will-try",
"title": "How to Unscramble an Omelet in Silicon Valley: The Musk v. Altman Trial That Will Try",
"publishDate": 1777287633,
"format": "standard",
"headTitle": "How to Unscramble an Omelet in Silicon Valley: The Musk v. Altman Trial That Will Try | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>Starting Monday in Oakland, a federal judge will consider \u003ca href=\"https://www.kqed.org/forum/2010101912956/its-elon-musks-world-were-just-living-in-it\">Elon Musk\u003c/a>’s claim that Sam Altman and OpenAI abandoned their founding promise to develop AI for the \u003ca href=\"https://www.kqed.org/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises\">benefit of humanity\u003c/a>, rather than solely for profit. At stake is not just $134 billion in potential damages, but whether it matters, legally speaking, that one of the most powerful AI companies in the world was built on a lie.\u003c/p>\n\u003cp>Musk and Altman co-founded OpenAI in 2015 as a nonprofit research lab, along with Greg Brockman, an AI researcher and entrepreneur, and others prominent in the field, but Musk left the company after a bitter falling out in 2018.\u003c/p>\n\u003cp>The following year, OpenAI established its first for-profit subsidiary, with investor returns capped at 100 times their investment. This structure would eventually evolve into the nearly trillion-dollar public benefit corporation OpenAI became in 2025. A public benefit corporation is essentially a for-profit company with a mission statement it’s legally required to consider, but not necessarily to prioritize.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>This\u003ca href=\"https://www.courtlistener.com/docket/69013420/musk-v-altman/\"> lawsuit\u003c/a>, filed in 2024, originally alleged that Altman and Brockman ran a ‘long con,’ conspiring to enrich themselves at Musk’s expense.\u003c/p>\n\u003cp>On the eve of trial, in a move OpenAI called “evasive,” Musk’s lawyers voluntarily dismissed those personal fraud claims. What proceeds to trial today are two claims that go beyond Musk’s personal grievance: unjust enrichment and breach of charitable trust — essentially, the argument that OpenAI betrayed, not just Musk, but the public it promised to serve.\u003c/p>\n\u003cp>OpenAI argues Musk was fully aware the research lab needed to evolve beyond its nonprofit structure, because he participated in those early discussions, and even proposed folding OpenAI into Tesla. Now, OpenAI’s lawyers argue, Musk is disingenuously trying to use the courts to kneecap the most prominent rival to his own weaker and more controversial AI venture, xAI.\u003c/p>\n\u003cfigure id=\"attachment_12075430\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12075430\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-03-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-03-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-03-KQED-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-03-KQED-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">A courtroom sketch depicts Elon Musk on the stand on March 4, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“Motivated by jealousy, regret for walking away from OpenAI and a desire to derail a competing AI company, Elon has spent years harassing OpenAI through baseless lawsuits and public attacks,” the company\u003ca href=\"https://openai.com/index/openai-elon-musk/\"> posted\u003c/a> on its website, where it also offers a\u003ca href=\"https://openai.com/index/elon-musk-wanted-an-openai-for-profit/\"> timeline\u003c/a> that Musk v. Altman et al case watchers will find helpful as they follow what promises to be a barnburner of a trial.\u003c/p>\n\u003cp>\u003ca href=\"https://www.courtlistener.com/docket/69013420/musk-v-altman/?page=3\">Hundreds of court filings\u003c/a> provide a dishy treasure trove of private communications worthy of a telenovela, including some juicy excerpts from Brockman’s personal journal.\u003c/p>\n\u003cp>He writes about Musk, “it’d be wrong to steal the nonprofit from him. … that’d be pretty morally bankrupt. and he’s really not an idiot.”[aside postID=news_12072425 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2024/05/AP24134775174210-1020x680.jpg']Also, “Financially, what will take me to $1B?”\u003c/p>\n\u003cp>But without a doubt, it is the beef between Musk and Altman that will dominate this show. “They really do not like each other. That part is not fake,” said Charlie Bullock, a senior research fellow at the nonprofit Institute for Law and AI who advises state and federal policy makers on AI governance topics.\u003c/p>\n\u003cp>This trial promises to put on lurid public display a mini-universe of incestuous business relationships between men famous for rewriting rules rather than following them.\u003c/p>\n\u003cp>Personal spite between Musk and Altman aside, Bullock said, “We’re going to learn a lot over the course of this case and from the conclusion of this case about whether the legal system can meaningfully constrain frontier AI labs.”\u003c/p>\n\u003cp>This trial, Bullock told KQED, is “sort of the fallback option” in the absence of other checks on bad behavior in the AI space, such as federal regulation.\u003c/p>\n\u003cp>There is, for instance, a well-established law in California about nonprofits, for-profits, and how transitions between the two should be regulated. Whether and how it applies in this case is up to U.S. District Judge Yvonne Gonzalez Rogers in Oakland to determine over the next month.\u003c/p>\n\u003ch2>OpenAI is like nothing that’s come before\u003c/h2>\n\u003cp>Jill Horwitz, a law professor at Northwestern University and faculty director of the Lowell Milken Center for Philanthropy and Nonprofits at UCLA Law, likens OpenAI’s unique structure to “An enormous tail on a tiny dog.”\u003c/p>\n\u003cp>“The tail is the operating company, which is what everybody thinks of as being OpenAI, and the dog is the nonprofit, and it’s tiny. And it remains to be seen whether that board can be independent enough, because there’s such overlap between the nonprofit board and the for-profit board,” Horwitz said.\u003c/p>\n\u003cfigure id=\"attachment_12054564\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12054564 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Samuel Altman, CEO of OpenAI, testifies before the Senate Judiciary Subcommittee on Privacy, Technology and the Law on May 16, 2023, in Washington, D.C. \u003ccite>(Win McNamee/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“It’s a weird structure. OpenAI isn’t one company. OpenAI is an interconnected group of companies. But it all is supposed to be advancing the nonprofit purpose,” Horwitz told KQED.\u003c/p>\n\u003cp>In 2018, even as OpenAI was privately contemplating the for-profit restructuring, it voluntarily adopted a new charter that restated and even strengthened its commitment to the public mission articulated at its founding.\u003c/p>\n\u003cp>In part, this had to do with the pressure Altman and OpenAI felt to attract top AI researchers, many of whom are concerned about the ethics of unleashing world-changing software on the rest of us. In 2024, 13 current and former OpenAI and Google DeepMind employees took the extraordinary step of publishing an \u003ca href=\"https://righttowarn.ai\">open letter\u003c/a> titled “Right to Warn,” calling out their own industry, and asking for protection if they warned the public.[aside postID=news_12079267 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Hegseth-Side-by-Side-c.jpg']“We are hopeful that these risks can be adequately mitigated with sufficient guidance from the scientific community, policymakers, and the public. However, AI companies have strong financial incentives to avoid effective oversight, and we do not believe bespoke structures of corporate governance are sufficient to change this.”\u003c/p>\n\u003cp>To this day, it remains unclear whether Altman’s talk about benefiting humanity was anything more than a savvy sales pitch designed to attract top AI talent and allay the concerns of \u003ca href=\"https://www.kqed.org/news/11976097/california-lawmakers-take-on-ai-regulation-with-a-host-of-bills\">federal regulators\u003c/a>. This is one of the key questions trial watchers will be most keen to see answered.\u003c/p>\n\u003cp>“It’s quite typical for scientific research organizations to do all the hard work of the research before their IP is sold to a for-profit company for practical purposes,” said Rose Chan Loui, founding executive director of the Lowell Milken Center for Philanthropy and Nonprofits at UCLA Law.\u003c/p>\n\u003cp>What makes OpenAI unusual, Chan Loui said, is how explicitly and repeatedly the AI developer bound itself to promising its AI would be developed safely and for the benefit of all of humanity. “When they opened up to investment and formed the subsidiary, they recommitted to that purpose. They tied themselves even more tightly.”\u003c/p>\n\u003cp>Anthropic, founded by former OpenAI employees who left over concerns about the company’s direction, has cultivated a reputation as the more safety-conscious, ethically serious player in the AI race, the light gray hat to OpenAI’s dark gray one. Anthropic chose to incorporate as a public benefit corporation from the beginning, rather than a nonprofit, because a public benefit corporation has far more legal flexibility. “Anthropic may be behaving in a way that the public thinks is more charitable, but its legal duties to do so are a lot lower than OpenAI’s,” Horwitz said.\u003c/p>\n\u003ch2>But is Musk the right party to bring this suit?\u003c/h2>\n\u003cp>For legal eagles following this case, it’s curious that Musk is the plaintiff, rather than California’s attorney general, who is the primary legal guardian of charitable assets in the state, where most of OpenAI’s assets are located. But in 2025, Attorney General Rob Bonta negotiated a binding \u003ca href=\"https://oag.ca.gov/system/files/attachments/press-docs/Final%20Executed%20MOU%20Between%20OpenAI%20and%20California%20AG%20re%20Notice%20of%20Conditions%20of%20Non-Objection%20%2810.27.2025%29%20%28Signed%20by%20OpenAI%29%20%28Signed%20by%20CA%20DOJ%29.pdf\">memorandum of understanding\u003c/a> with OpenAI. The AG in Delaware, where OpenAI is incorporated, issued a parallel statement of non-objection.\u003c/p>\n\u003cp>A coalition of more than 30 California foundations and nonprofit organizations, including the San Francisco Foundation and TechEquity, \u003ca href=\"https://www.sff.org/Offsite-Media/Charitable-coalition-letter-on-OpenAI-conversion-1-29-25.pdf\">urged Bonta\u003c/a> to take immediate legal action to protect OpenAI’s charitable assets, arguing his office had both the authority and the responsibility to do so.\u003c/p>\n\u003cfigure id=\"attachment_12063671\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12063671\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/RobBontaAP.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/RobBontaAP.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/RobBontaAP-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/RobBontaAP-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">California Attorney General Rob Bonta speaks to reporters as Arizona Attorney General Kris Mayes, left, and Oregon Attorney General Dan Rayfield, right, listen outside the Supreme Court on Wednesday, Nov. 5, 2025, in Washington, D.C. \u003ccite>(Mark Schiefelbein/AP Photo)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>\u003ca href=\"https://www.kqed.org/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises\">More than 50 organizations\u003c/a> also petitioned Bonta to halt OpenAI’s for-profit conversion until he calculated the full market value of OpenAI’s nonprofit assets, estimated at the time at up to $300 billion, and directed OpenAI to transfer that value to independent nonprofit entities.\u003c/p>\n\u003cp>“It’s not too late for the Attorney General to revisit his agreement with OpenAI,” wrote Catherine Bracy, founder and CEO of TechEquity, an Oakland-based tech accountability organization. “The evidence this trial unearths, especially how OpenAI violated its original charitable mission in pursuit of profit, will likely leave him no choice.”\u003c/p>\n\u003cp>Chan Loui is among those scratching her head over a basic question: why does Musk get to bring this case at all? “He’s a competitor,” she said.\u003c/p>\n\u003cp>A personal fraud claim, that Altman lied to him to get his money, might have given Musk the clearest standing as an injured party. But Musk voluntarily dismissed those claims late last week. What remains rests almost entirely on a public interest argument, one that California’s attorney general, not a billionaire with a rival AI company of his own, would typically make. [aside postID=news_12079896 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Daniel-Moreno-Gama-AP.jpg']Chan Loui worries about what it would mean if Judge Gonzalez Rogers effectively threw out that hard-won agreement between the attorneys general and OpenAI, essentially substituting a billionaire rival’s lawsuit for the state’s own regulatory process, whatever its deficiencies.\u003c/p>\n\u003cp>“You don’t want just anyone, any donor to complain,” Chan Loui said. “We have all this litigation against charities.” She said she sympathizes with those who want OpenAI to recommit as fully as possible to its original ethos, but she worries about what legal precedents this case could set for everybody else.\u003c/p>\n\u003cp>What’s not in dispute is that this trial will be a riveting spectacle for Silicon Valley, which will be watching this case with a mix of curiosity and fear. Judge Gonzalez Rogers has already proven \u003ca href=\"https://oag.ca.gov/news/press-releases/attorney-general-bonta-epic-v-apple-decision-win-california-law-protecting\">she will rule\u003c/a> against powerful tech companies when she determines the law demands it.\u003c/p>\n\u003cp>Also, the documents already unsealed suggest that what gets said in that Oakland courtroom may reveal a lot more about how Silicon Valley’s AI elite actually operates than anything previously said or posted in public.\u003c/p>\n\u003cp>“How much is OpenAI worth? Most of \u003ca href=\"https://www.reuters.com/business/openai-lays-groundwork-juggernaut-ipo-up-1-trillion-valuation-2025-10-29/\">$1 trillion\u003c/a>?” Bullock said. “There are ways that you could unscramble this omelet, but it would be extremely difficult, and it would be a massive headache for everyone involved.” He anticipates that whoever ends up on the losing end of this case will appeal.\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "Two Silicon Valley titans, Elon Musk and Sam Altman, face off in court starting Monday in a case that claims Altman and others enriched themselves by allegedly betraying OpenAI’s founding mission.",
"status": "publish",
"parent": 0,
"modified": 1777313556,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 35,
"wordCount": 1943
},
"headData": {
"title": "How to Unscramble an Omelet in Silicon Valley: The Musk v. Altman Trial That Will Try | KQED",
"description": "Two Silicon Valley titans, Elon Musk and Sam Altman, face off in court starting Monday in a case that claims Altman and others enriched themselves by allegedly betraying OpenAI’s founding mission.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "How to Unscramble an Omelet in Silicon Valley: The Musk v. Altman Trial That Will Try",
"datePublished": "2026-04-27T04:00:33-07:00",
"dateModified": "2026-04-27T11:12:36-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 6188,
"slug": "law-and-justice",
"name": "Law and Justice"
},
"audioUrl": "https://traffic.omny.fm/d/clips/0af137ef-751e-4b19-a055-aaef00d2d578/ffca7e9f-6831-41c5-bcaf-aaef00f5a073/a372dc1c-fe90-423e-b5c6-b439011129f7/audio.mp3",
"sticky": false,
"nprStoryId": "kqed-12081290",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12081290/how-to-unscramble-an-omelet-in-silicon-valley-the-musk-v-altman-trial-that-will-try",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>Starting Monday in Oakland, a federal judge will consider \u003ca href=\"https://www.kqed.org/forum/2010101912956/its-elon-musks-world-were-just-living-in-it\">Elon Musk\u003c/a>’s claim that Sam Altman and OpenAI abandoned their founding promise to develop AI for the \u003ca href=\"https://www.kqed.org/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises\">benefit of humanity\u003c/a>, rather than solely for profit. At stake is not just $134 billion in potential damages, but whether it matters, legally speaking, that one of the most powerful AI companies in the world was built on a lie.\u003c/p>\n\u003cp>Musk and Altman co-founded OpenAI in 2015 as a nonprofit research lab, along with Greg Brockman, an AI researcher and entrepreneur, and others prominent in the field, but Musk left the company after a bitter falling out in 2018.\u003c/p>\n\u003cp>The following year, OpenAI established its first for-profit subsidiary, with investor returns capped at 100 times their investment. This structure would eventually evolve into the nearly trillion-dollar public benefit corporation OpenAI became in 2025. A public benefit corporation is essentially a for-profit company with a mission statement it’s legally required to consider, but not necessarily to prioritize.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>This\u003ca href=\"https://www.courtlistener.com/docket/69013420/musk-v-altman/\"> lawsuit\u003c/a>, filed in 2024, originally alleged that Altman and Brockman ran a ‘long con,’ conspiring to enrich themselves at Musk’s expense.\u003c/p>\n\u003cp>On the eve of trial, in a move OpenAI called “evasive,” Musk’s lawyers voluntarily dismissed those personal fraud claims. What proceeds to trial today are two claims that go beyond Musk’s personal grievance: unjust enrichment and breach of charitable trust — essentially, the argument that OpenAI betrayed, not just Musk, but the public it promised to serve.\u003c/p>\n\u003cp>OpenAI argues Musk was fully aware the research lab needed to evolve beyond its nonprofit structure, because he participated in those early discussions, and even proposed folding OpenAI into Tesla. Now, OpenAI’s lawyers argue, Musk is disingenuously trying to use the courts to kneecap the most prominent rival to his own weaker and more controversial AI venture, xAI.\u003c/p>\n\u003cfigure id=\"attachment_12075430\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12075430\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-03-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-03-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-03-KQED-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-03-KQED-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">A courtroom sketch depicts Elon Musk on the stand on March 4, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“Motivated by jealousy, regret for walking away from OpenAI and a desire to derail a competing AI company, Elon has spent years harassing OpenAI through baseless lawsuits and public attacks,” the company\u003ca href=\"https://openai.com/index/openai-elon-musk/\"> posted\u003c/a> on its website, where it also offers a\u003ca href=\"https://openai.com/index/elon-musk-wanted-an-openai-for-profit/\"> timeline\u003c/a> that Musk v. Altman et al case watchers will find helpful as they follow what promises to be a barnburner of a trial.\u003c/p>\n\u003cp>\u003ca href=\"https://www.courtlistener.com/docket/69013420/musk-v-altman/?page=3\">Hundreds of court filings\u003c/a> provide a dishy treasure trove of private communications worthy of a telenovela, including some juicy excerpts from Brockman’s personal journal.\u003c/p>\n\u003cp>He writes about Musk, “it’d be wrong to steal the nonprofit from him. … that’d be pretty morally bankrupt. and he’s really not an idiot.”\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12072425",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/05/AP24134775174210-1020x680.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Also, “Financially, what will take me to $1B?”\u003c/p>\n\u003cp>But without a doubt, it is the beef between Musk and Altman that will dominate this show. “They really do not like each other. That part is not fake,” said Charlie Bullock, a senior research fellow at the nonprofit Institute for Law and AI who advises state and federal policy makers on AI governance topics.\u003c/p>\n\u003cp>This trial promises to put on lurid public display a mini-universe of incestuous business relationships between men famous for rewriting rules rather than following them.\u003c/p>\n\u003cp>Personal spite between Musk and Altman aside, Bullock said, “We’re going to learn a lot over the course of this case and from the conclusion of this case about whether the legal system can meaningfully constrain frontier AI labs.”\u003c/p>\n\u003cp>This trial, Bullock told KQED, is “sort of the fallback option” in the absence of other checks on bad behavior in the AI space, such as federal regulation.\u003c/p>\n\u003cp>There is, for instance, a well-established law in California about nonprofits, for-profits, and how transitions between the two should be regulated. Whether and how it applies in this case is up to U.S. District Judge Yvonne Gonzalez Rogers in Oakland to determine over the next month.\u003c/p>\n\u003ch2>OpenAI is like nothing that’s come before\u003c/h2>\n\u003cp>Jill Horwitz, a law professor at Northwestern University and faculty director of the Lowell Milken Center for Philanthropy and Nonprofits at UCLA Law, likens OpenAI’s unique structure to “An enormous tail on a tiny dog.”\u003c/p>\n\u003cp>“The tail is the operating company, which is what everybody thinks of as being OpenAI, and the dog is the nonprofit, and it’s tiny. And it remains to be seen whether that board can be independent enough, because there’s such overlap between the nonprofit board and the for-profit board,” Horwitz said.\u003c/p>\n\u003cfigure id=\"attachment_12054564\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12054564 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Samuel Altman, CEO of OpenAI, testifies before the Senate Judiciary Subcommittee on Privacy, Technology and the Law on May 16, 2023, in Washington, D.C. \u003ccite>(Win McNamee/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“It’s a weird structure. OpenAI isn’t one company. OpenAI is an interconnected group of companies. But it all is supposed to be advancing the nonprofit purpose,” Horwitz told KQED.\u003c/p>\n\u003cp>In 2018, even as OpenAI was privately contemplating the for-profit restructuring, it voluntarily adopted a new charter that restated and even strengthened its commitment to the public mission articulated at its founding.\u003c/p>\n\u003cp>In part, this had to do with the pressure Altman and OpenAI felt to attract top AI researchers, many of whom are concerned about the ethics of unleashing world-changing software on the rest of us. In 2024, 13 current and former OpenAI and Google DeepMind employees took the extraordinary step of publishing an \u003ca href=\"https://righttowarn.ai\">open letter\u003c/a> titled “Right to Warn,” calling out their own industry, and asking for protection if they warned the public.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12079267",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Hegseth-Side-by-Side-c.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>“We are hopeful that these risks can be adequately mitigated with sufficient guidance from the scientific community, policymakers, and the public. However, AI companies have strong financial incentives to avoid effective oversight, and we do not believe bespoke structures of corporate governance are sufficient to change this.”\u003c/p>\n\u003cp>To this day, it remains unclear whether Altman’s talk about benefiting humanity was anything more than a savvy sales pitch designed to attract top AI talent and allay the concerns of \u003ca href=\"https://www.kqed.org/news/11976097/california-lawmakers-take-on-ai-regulation-with-a-host-of-bills\">federal regulators\u003c/a>. This is one of the key questions trial watchers will be most keen to see answered.\u003c/p>\n\u003cp>“It’s quite typical for scientific research organizations to do all the hard work of the research before their IP is sold to a for-profit company for practical purposes,” said Rose Chan Loui, founding executive director of the Lowell Milken Center for Philanthropy and Nonprofits at UCLA Law.\u003c/p>\n\u003cp>What makes OpenAI unusual, Chan Loui said, is how explicitly and repeatedly the AI developer bound itself to promising its AI would be developed safely and for the benefit of all of humanity. “When they opened up to investment and formed the subsidiary, they recommitted to that purpose. They tied themselves even more tightly.”\u003c/p>\n\u003cp>Anthropic, founded by former OpenAI employees who left over concerns about the company’s direction, has cultivated a reputation as the more safety-conscious, ethically serious player in the AI race, the light gray hat to OpenAI’s dark gray one. Anthropic chose to incorporate as a public benefit corporation from the beginning, rather than a nonprofit, because a public benefit corporation has far more legal flexibility. “Anthropic may be behaving in a way that the public thinks is more charitable, but its legal duties to do so are a lot lower than OpenAI’s,” Horwitz said.\u003c/p>\n\u003ch2>But is Musk the right party to bring this suit?\u003c/h2>\n\u003cp>For legal eagles following this case, it’s curious that Musk is the plaintiff, rather than California’s attorney general, who is the primary legal guardian of charitable assets in the state, where most of OpenAI’s assets are located. But in 2025, Attorney General Rob Bonta negotiated a binding \u003ca href=\"https://oag.ca.gov/system/files/attachments/press-docs/Final%20Executed%20MOU%20Between%20OpenAI%20and%20California%20AG%20re%20Notice%20of%20Conditions%20of%20Non-Objection%20%2810.27.2025%29%20%28Signed%20by%20OpenAI%29%20%28Signed%20by%20CA%20DOJ%29.pdf\">memorandum of understanding\u003c/a> with OpenAI. The AG in Delaware, where OpenAI is incorporated, issued a parallel statement of non-objection.\u003c/p>\n\u003cp>A coalition of more than 30 California foundations and nonprofit organizations, including the San Francisco Foundation and TechEquity, \u003ca href=\"https://www.sff.org/Offsite-Media/Charitable-coalition-letter-on-OpenAI-conversion-1-29-25.pdf\">urged Bonta\u003c/a> to take immediate legal action to protect OpenAI’s charitable assets, arguing his office had both the authority and the responsibility to do so.\u003c/p>\n\u003cfigure id=\"attachment_12063671\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12063671\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/RobBontaAP.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/RobBontaAP.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/RobBontaAP-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/RobBontaAP-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">California Attorney General Rob Bonta speaks to reporters as Arizona Attorney General Kris Mayes, left, and Oregon Attorney General Dan Rayfield, right, listen outside the Supreme Court on Wednesday, Nov. 5, 2025, in Washington, D.C. \u003ccite>(Mark Schiefelbein/AP Photo)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>\u003ca href=\"https://www.kqed.org/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises\">More than 50 organizations\u003c/a> also petitioned Bonta to halt OpenAI’s for-profit conversion until he calculated the full market value of OpenAI’s nonprofit assets, estimated at the time at up to $300 billion, and directed OpenAI to transfer that value to independent nonprofit entities.\u003c/p>\n\u003cp>“It’s not too late for the Attorney General to revisit his agreement with OpenAI,” wrote Catherine Bracy, founder and CEO of TechEquity, an Oakland-based tech accountability organization. “The evidence this trial unearths, especially how OpenAI violated its original charitable mission in pursuit of profit, will likely leave him no choice.”\u003c/p>\n\u003cp>Chan Loui is among those scratching her head over a basic question: why does Musk get to bring this case at all? “He’s a competitor,” she said.\u003c/p>\n\u003cp>A personal fraud claim, that Altman lied to him to get his money, might have given Musk the clearest standing as an injured party. But Musk voluntarily dismissed those claims late last week. What remains rests almost entirely on a public interest argument, one that California’s attorney general, not a billionaire with a rival AI company of his own, would typically make. \u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12079896",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Daniel-Moreno-Gama-AP.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Chan Loui worries about what it would mean if Judge Gonzalez Rogers effectively threw out that hard-won agreement between the attorneys general and OpenAI, essentially substituting a billionaire rival’s lawsuit for the state’s own regulatory process, whatever its deficiencies.\u003c/p>\n\u003cp>“You don’t want just anyone, any donor to complain,” Chan Loui said. “We have all this litigation against charities.” She said she sympathizes with those who want OpenAI to recommit as fully as possible to its original ethos, but she worries about what legal precedents this case could set for everybody else.\u003c/p>\n\u003cp>What’s not in dispute is that this trial will be a riveting spectacle for Silicon Valley, which will be watching this case with a mix of curiosity and fear. Judge Gonzalez Rogers has already proven \u003ca href=\"https://oag.ca.gov/news/press-releases/attorney-general-bonta-epic-v-apple-decision-win-california-law-protecting\">she will rule\u003c/a> against powerful tech companies when she determines the law demands it.\u003c/p>\n\u003cp>Also, the documents already unsealed suggest that what gets said in that Oakland courtroom may reveal a lot more about how Silicon Valley’s AI elite actually operates than anything previously said or posted in public.\u003c/p>\n\u003cp>“How much is OpenAI worth? Most of \u003ca href=\"https://www.reuters.com/business/openai-lays-groundwork-juggernaut-ipo-up-1-trillion-valuation-2025-10-29/\">$1 trillion\u003c/a>?” Bullock said. “There are ways that you could unscramble this omelet, but it would be extremely difficult, and it would be a massive headache for everyone involved.” He anticipates that whoever ends up on the losing end of this case will appeal.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12081290/how-to-unscramble-an-omelet-in-silicon-valley-the-musk-v-altman-trial-that-will-try",
"authors": [
"251"
],
"categories": [
"news_6188",
"news_8",
"news_248"
],
"tags": [
"news_34755",
"news_1386",
"news_18538",
"news_3897",
"news_27626",
"news_23052",
"news_19954",
"news_34054",
"news_33542",
"news_33543",
"news_38",
"news_34586",
"news_1631"
],
"featImg": "news_12080929",
"label": "news"
},
"news_12081336": {
"type": "posts",
"id": "news_12081336",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12081336",
"score": null,
"sort": [
1777129237000
]
},
"guestAuthors": [],
"slug": "these-uc-berkeley-students-are-leading-the-fight-against-phones",
"title": "These UC Berkeley Students Are Leading the Fight Against Phones",
"publishDate": 1777129237,
"format": "standard",
"headTitle": "These UC Berkeley Students Are Leading the Fight Against Phones | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>On a sunny Friday afternoon at Memorial Glade, the center of \u003ca href=\"https://www.kqed.org/news/tag/uc-berkeley\">UC Berkeley’s campus\u003c/a>, students set up volleyball nets, cornhole, picnic blankets and a makeshift plywood stage for live music.\u003c/p>\n\u003cp>Their goal? To throw a phone-free party.\u003c/p>\n\u003cp>Music blasted from a speaker near a snack table. Colorful, handwritten signs read messages like “Favorite app? Delete it,” and “Take back your mind.”\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>At a check-in table, students had the option to seal their cell phones in a plastic bag. Nearby, students propped up gravestones cut out of posterboard, each bearing the logo of a different social media app.\u003c/p>\n\u003cp>The event was hosted by \u003ca href=\"https://www.projectreboot.school/\">Project Reboot\u003c/a>, an organization born on Berkeley’s campus, with the mission of helping young people “reset their tech habits, reclaim their time and regain their focus.”\u003c/p>\n\u003cp>The project began in the form of a semester-long class that helped students reduce their screen time.\u003c/p>\n\u003cfigure id=\"attachment_12081401\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12081401 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones1.jpg\" alt=\"\" width=\"2000\" height=\"1500\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones1.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones1-160x120.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones1-1536x1152.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">A handmade sign reads “Live With Intention” at a phone-free event at UC Berkeley on Friday, April 24, 2026. \u003ccite>(Eliza Peppel/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“I feel like [screen] addiction has kind of been our birthright,” said Dawson Kelly, a third-year student and one of the event’s hosts. Kelly said he’s working on a thesis about digital dependence. “We need more infrastructure for our generation to take back our time, take back our agency, and look at all the things that have been stolen from us, and not let this be the anxious generation that we’ve been made out to be.”\u003c/p>\n\u003cp>Sahar Yousef, a Berkeley neuroscientist and lecturer who serves on Project Reboot’s research advisory board, said her students are increasingly pushing back “against the default of being on their phones, constantly scrolling.\u003c/p>\n\u003cp>“This is truly a demonstration that they’ve wanted to put together,” Yousef said, “to demonstrate what has really been taken from them.”[aside postID=news_12078253 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2024/01/240104-PEOPLES-PARK-MD-05-1020x680.jpg']According to a survey of UC Berkeley undergraduates, 78% of students reported that they believe their phone use “prevents them from thinking deeply, being creative, or engaging fully with ideas.”\u003c/p>\n\u003cp>Third-year students Ashlyn Torres and Izzy Newman said they found out about Friday’s event from a flier, instead of through the usual social media channels. Torres said she left her phone at home before joining.\u003c/p>\n\u003cp>“It was different this morning because I was able to recognize there is life around me,” she said. “And we probably should talk to each other more and just listen to what the world has to offer rather than just what our phones have to offer.”\u003c/p>\n\u003cp>Jonny Vasquez is a third-year student and advocate for reduced screen time on campus. To reach other students, he said, he started standing in a busy area of campus holding a sign that read, “Lowest screentime contest.”\u003c/p>\n\u003cp>“People would either completely ignore the sign,” Vasquez said, “or they would come up and say, ‘Oh my goodness, I’ve been waiting for someone to help us with this.’”\u003c/p>\n\u003cp>Vasquez said that since he deleted his social media accounts, he’s stopped comparing himself to others and experiences greater overall satisfaction with his life. He said he hopes to continue to share that with others.\u003c/p>\n\u003cfigure id=\"attachment_12081399\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081399\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones3.jpg\" alt=\"\" width=\"2000\" height=\"1500\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones3.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones3-160x120.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones3-1536x1152.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Students rally during a game of volleyball on the grass at a phone-free gathering at UC Berkeley on Friday, April 24, 2026. \u003ccite>(Eliza Peppel/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Several students offered each other tips about creating some distance with their phones, including plugging it in out of reach overnight, turning it completely off while socializing, and leaning on a community with like-minded goals to hold each other accountable.\u003c/p>\n\u003cp>Kelly said that the movement the students hope to create is about their personal agency.\u003c/p>\n\u003cp>“These are the peak years of our lives, and they’ve been stolen from us by companies that are making billions and billions of dollars every single year to take as much of our time as possible. We have to fight back, and we fight back by connecting and engaging in a life that we should have been living from the beginning.”\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "They’re deleting their social media accounts and reducing their screen time. And they want you to join them.",
"status": "publish",
"parent": 0,
"modified": 1777309134,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 19,
"wordCount": 738
},
"headData": {
"title": "These UC Berkeley Students Are Leading the Fight Against Phones | KQED",
"description": "They’re deleting their social media accounts and reducing their screen time. And they want you to join them.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "These UC Berkeley Students Are Leading the Fight Against Phones",
"datePublished": "2026-04-25T08:00:37-07:00",
"dateModified": "2026-04-27T09:58:54-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 18540,
"slug": "education",
"name": "Education"
},
"audioUrl": "https://traffic.omny.fm/d/clips/0af137ef-751e-4b19-a055-aaef00d2d578/ffca7e9f-6831-41c5-bcaf-aaef00f5a073/20814760-e003-42de-97f3-b43901162922/audio.mp3",
"sticky": false,
"nprStoryId": "kqed-12081336",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12081336/these-uc-berkeley-students-are-leading-the-fight-against-phones",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>On a sunny Friday afternoon at Memorial Glade, the center of \u003ca href=\"https://www.kqed.org/news/tag/uc-berkeley\">UC Berkeley’s campus\u003c/a>, students set up volleyball nets, cornhole, picnic blankets and a makeshift plywood stage for live music.\u003c/p>\n\u003cp>Their goal? To throw a phone-free party.\u003c/p>\n\u003cp>Music blasted from a speaker near a snack table. Colorful, handwritten signs read messages like “Favorite app? Delete it,” and “Take back your mind.”\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>At a check-in table, students had the option to seal their cell phones in a plastic bag. Nearby, students propped up gravestones cut out of posterboard, each bearing the logo of a different social media app.\u003c/p>\n\u003cp>The event was hosted by \u003ca href=\"https://www.projectreboot.school/\">Project Reboot\u003c/a>, an organization born on Berkeley’s campus, with the mission of helping young people “reset their tech habits, reclaim their time and regain their focus.”\u003c/p>\n\u003cp>The project began in the form of a semester-long class that helped students reduce their screen time.\u003c/p>\n\u003cfigure id=\"attachment_12081401\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12081401 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones1.jpg\" alt=\"\" width=\"2000\" height=\"1500\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones1.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones1-160x120.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones1-1536x1152.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">A handmade sign reads “Live With Intention” at a phone-free event at UC Berkeley on Friday, April 24, 2026. \u003ccite>(Eliza Peppel/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“I feel like [screen] addiction has kind of been our birthright,” said Dawson Kelly, a third-year student and one of the event’s hosts. Kelly said he’s working on a thesis about digital dependence. “We need more infrastructure for our generation to take back our time, take back our agency, and look at all the things that have been stolen from us, and not let this be the anxious generation that we’ve been made out to be.”\u003c/p>\n\u003cp>Sahar Yousef, a Berkeley neuroscientist and lecturer who serves on Project Reboot’s research advisory board, said her students are increasingly pushing back “against the default of being on their phones, constantly scrolling.\u003c/p>\n\u003cp>“This is truly a demonstration that they’ve wanted to put together,” Yousef said, “to demonstrate what has really been taken from them.”\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12078253",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/01/240104-PEOPLES-PARK-MD-05-1020x680.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>According to a survey of UC Berkeley undergraduates, 78% of students reported that they believe their phone use “prevents them from thinking deeply, being creative, or engaging fully with ideas.”\u003c/p>\n\u003cp>Third-year students Ashlyn Torres and Izzy Newman said they found out about Friday’s event from a flier, instead of through the usual social media channels. Torres said she left her phone at home before joining.\u003c/p>\n\u003cp>“It was different this morning because I was able to recognize there is life around me,” she said. “And we probably should talk to each other more and just listen to what the world has to offer rather than just what our phones have to offer.”\u003c/p>\n\u003cp>Jonny Vasquez is a third-year student and advocate for reduced screen time on campus. To reach other students, he said, he started standing in a busy area of campus holding a sign that read, “Lowest screentime contest.”\u003c/p>\n\u003cp>“People would either completely ignore the sign,” Vasquez said, “or they would come up and say, ‘Oh my goodness, I’ve been waiting for someone to help us with this.’”\u003c/p>\n\u003cp>Vasquez said that since he deleted his social media accounts, he’s stopped comparing himself to others and experiences greater overall satisfaction with his life. He said he hopes to continue to share that with others.\u003c/p>\n\u003cfigure id=\"attachment_12081399\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081399\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones3.jpg\" alt=\"\" width=\"2000\" height=\"1500\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones3.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones3-160x120.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/UCBNoPhones3-1536x1152.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Students rally during a game of volleyball on the grass at a phone-free gathering at UC Berkeley on Friday, April 24, 2026. \u003ccite>(Eliza Peppel/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Several students offered each other tips about creating some distance with their phones, including plugging it in out of reach overnight, turning it completely off while socializing, and leaning on a community with like-minded goals to hold each other accountable.\u003c/p>\n\u003cp>Kelly said that the movement the students hope to create is about their personal agency.\u003c/p>\n\u003cp>“These are the peak years of our lives, and they’ve been stolen from us by companies that are making billions and billions of dollars every single year to take as much of our time as possible. We have to fight back, and we fight back by connecting and engaging in a life that we should have been living from the beginning.”\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12081336/these-uc-berkeley-students-are-leading-the-fight-against-phones",
"authors": [
"11989"
],
"categories": [
"news_18540",
"news_457",
"news_28250",
"news_8"
],
"tags": [
"news_129",
"news_35288",
"news_36084",
"news_20013",
"news_27626",
"news_18543",
"news_2109",
"news_4950",
"news_1089",
"news_1631",
"news_17597"
],
"featImg": "news_12081402",
"label": "news"
},
"news_12081279": {
"type": "posts",
"id": "news_12081279",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12081279",
"score": null,
"sort": [
1777055475000
]
},
"guestAuthors": [],
"slug": "anthropic-mythos-claude-unauthorized-breach-investigation-cybersecurity",
"title": "After a Potential Mythos Breach, Why Do Developers Use Such Powerful AI Models?",
"publishDate": 1777055475,
"format": "standard",
"headTitle": "After a Potential Mythos Breach, Why Do Developers Use Such Powerful AI Models? | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>\u003ca href=\"https://www.kqed.org/news/tag/ai\">Artificial intelligence\u003c/a> is making life easier for some — and a lot harder for others. San Francisco-based AI firm Anthropic — which also developed the chatbot Claude — earlier this month released Mythos, a powerful model \u003ca href=\"https://red.anthropic.com/2026/mythos-preview/\">developers claim\u003c/a> can identify and exploit “vulnerabilities in every major operating system and every major web browser when directed by a user to do so.”\u003c/p>\n\u003cp>Anthropic has only given a few companies — among them JPMorgan Chase, cybersecurity giant CrowdStrike and fellow AI developers Google and Amazon — access to Mythos as part of what it’s calling “Project Glasswing.” The goal of this partnership, Anthropic \u003ca href=\"https://www.anthropic.com/glasswing\">said\u003c/a>, is to use Mythos to prevent hackers (who \u003ca href=\"https://www.axios.com/2025/11/13/anthropic-china-claude-code-cyberattack\">are using\u003c/a> their own powerful AI models) from targeting the weak spots in the software that helps these massive corporations run.\u003c/p>\n\u003cp>But despite the high level of secrecy surrounding its model, Anthropic confirmed to KQED on Thursday that it is currently investigating a report of “unauthorized access” to Mythos through one of the third-party vendors helping develop the software. The company has not found any evidence yet that Anthropic systems have been affected or that the reported activity extends beyond the third-party vendor environment.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>Even before this latest incident, \u003ca href=\"https://www.cbc.ca/news/business/mythos-anthropic-ai-explainer-9.7171597\">multiple cybersecurity experts\u003c/a> and \u003ca href=\"https://www.reuters.com/business/finance/bessent-powell-warn-bank-ceos-about-anthropic-model-risks-bloomberg-news-reports-2026-04-10/\">global leaders\u003c/a> raised concerns about the power of Mythos and the potential consequences if this software fell into the wrong hands.\u003c/p>\n\u003cp>Earlier this week, KQED’s Forum \u003ca href=\"https://www.kqed.org/forum/2010101913607/anthropics-new-ai-mythos-is-a-cybersecurity-game-changer\">spoke with\u003c/a> Alex Stamos, computer science lecturer at Stanford University and chief product officer for San Francisco-based AI firm Corridor, to understand why developers still move forward with creating such powerful technology despite the potential risks.\u003c/p>\n\u003cp>Keep reading for the takeaways from his conversation with KQED’s Mina Kim, including insights on how folks who are not software engineers can sift through all the buzz surrounding this quickly evolving technology.\u003c/p>\n\u003cp>\u003cstrong>Skip ahead to:\u003c/strong>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"#WhyarecybersecurityexpertssoworriedaboutMythos\">Why are cybersecurity experts so worried about Mythos?\u003c/a>\u003c/li>\n\u003cli>\u003ca href=\"#WhywouldAnthropiclimitwhocanusethistechnology\">Why would Anthropic limit who can use this technology?\u003c/a>\u003c/li>\n\u003cli>\u003ca href=\"#IsthefederalgovernmentalsousingMythos\">Is the federal government also using Mythos?\u003c/a>\u003c/li>\n\u003cli>\u003ca href=\"#Whyusesuchapowerfulbutunpredictabletechnologyatall\">Why use such a powerful — but unpredictable — technology at all?\u003c/a>\u003c/li>\n\u003c/ul>\n\u003cp>\u003cem>This conversation has been edited for length and clarity.\u003c/em>\u003c/p>\n\u003cp>\u003cstrong>Mina Kim: What is Mythos capable of?\u003c/strong>\u003c/p>\n\u003cp>\u003cstrong>Alex Stamos:\u003c/strong> Mythos is a model that Anthropic has not released publicly. They’ve provided it to a very small number of large companies to use privately, as well as to some very important open-source projects to use.\u003c/p>\n\u003cp>Anthropic believes Mythos marks a large-scale change from the AI capabilities that have existed in the past. They’ve now been able to find thousands of vulnerabilities instead of just dozens or hundreds.\u003c/p>\n\u003cp>What we’ve seen in the past is that these things are really good at finding bugs, and they’re much faster than humans. But now Mythos is even better than the best human security consultants and security engineers.\u003c/p>\n\u003cfigure id=\"attachment_12081283\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081283\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AnthropicAP.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AnthropicAP.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AnthropicAP-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AnthropicAP-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">The Anthropic website and the company’s logo are displayed on a computer screen in New York on Feb. 26, 2026. \u003ccite>(Patrick Sison/AP Photo)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>\u003cstrong>\u003ca id=\"WhyarecybersecurityexpertssoworriedaboutMythos\">\u003c/a>You’re describing an incredible tool to find bugs, holes and issues that we have not seen before so that we can defend against them. So why is it scaring people so much?\u003c/strong>\u003c/p>\n\u003cp>It’s scaring people because the first step in attacking a system is finding flaws in that system. In the cybersecurity world, we use a term called the kill chain. This is a term we borrowed from the military.\u003c/p>\n\u003cp>When the military uses it, it refers to discovering an asset, doing reconnaissance, and figuring out how to deliver a weapon on a target.\u003c/p>\n\u003cp>In the cyber world, the kill chain involves reconnaissance, finding a flaw in a system used by a target, weaponizing that flaw, delivering the exploit, establishing command and control of the system, exploring the network, moving through it, and then doing whatever you want — whether that’s stealing data, shutting down a system, or encrypting it for ransom.\u003c/p>\n\u003cp>Major AI companies, like Anthropic and OpenAI, have released threat reports — building on earlier efforts from companies like Facebook and Google— that show how people use these platforms for malicious activity.\u003c/p>\n\u003cp>Those reports show that advanced threat actors are using AI to automate other parts of the attack process, like exploring networks, breaking in and establishing control channels.[aside postID=news_12076608 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/Billboard-AI-Illustration_6.jpg']What we’re seeing is attackers taking tasks that used to require human effort — and therefore had limits — and using AI to make them faster and cheaper.\u003c/p>\n\u003cp>\u003cstrong>And I imagine that our ability to patch or defend against these activities pales in comparison, or am I wrong? Do the patches exist, and are they easy to implement?\u003c/strong>\u003c/p>\n\u003cp>This is where AI can help. AI can find flaws, and it can also write patches. That’s the good news. That’s why Anthropic is providing Mythos to companies and open-source maintainers — not just to find bugs, but to fix them.\u003c/p>\n\u003cp>What we’re trying to do as an industry right now is fix vulnerabilities before adversaries can exploit them. There’s a race underway. The most advanced models — what we call foundation models, like those from Anthropic, OpenAI and Google — are currently ahead of open-weight models, many of which are developed by Chinese companies.\u003c/p>\n\u003cp>\u003cstrong>A listener writes: ‘Anthropic is releasing their models as a warning, but there’s no federal or state guidelines on this. Are we close to government regulatory action at all?’\u003c/strong>\u003c/p>\n\u003cp>The current administration \u003ca href=\"https://www.politico.com/news/2026/04/23/trump-picked-a-fight-with-anthropic-now-the-administration-is-backing-off-00889241\">came down on Anthropic\u003c/a> because they thought they were too ethical … Of the major AI labs, I think Anthropic is the one with the most deep-seated ethical frameworks. I think we’re fortunate that they have the models that are the best at bug-finding, and they’re setting a good standard here.\u003c/p>\n\u003cp>\u003cstrong>\u003ca id=\"IsthefederalgovernmentalsousingMythos\">\u003c/a>Do you know the extent to which the federal government is also using Mythos to search for and patch its own security vulnerabilities?\u003c/strong>\u003c/p>\n\u003cp>My understanding is that U.S. Cyber Command has been testing Mythos. Now the fascinating question is: How is the U.S. government going to use it?\u003c/p>\n\u003cp>In the National Security Agency, after the \u003ca href=\"https://www.theguardian.com/world/interactive/2013/nov/01/snowden-nsa-files-surveillance-revelations-decoded#section/1\">Snowden disclosures\u003c/a>, there is the creation of this thing called the \u003ca href=\"https://trumpwhitehouse.archives.gov/sites/whitehouse.gov/files/images/External%20-%20Unclassified%20VEP%20Charter%20FINAL.PDF\">Vulnerabilities Equities Process\u003c/a>, which is the process by which NSA and U.S.\u003c/p>\n\u003cfigure id=\"attachment_12079281\" class=\"wp-caption aligncenter\" style=\"max-width: 1980px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12079281\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Hegseth-Side-by-Side-c.jpg\" alt=\"\" width=\"1980\" height=\"1460\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Hegseth-Side-by-Side-c.jpg 1980w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Hegseth-Side-by-Side-c-160x118.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Hegseth-Side-by-Side-c-1536x1133.jpg 1536w\" sizes=\"auto, (max-width: 1980px) 100vw, 1980px\">\u003cfigcaption class=\"wp-caption-text\">Left: Anthropic co-founder and CEO Dario Amodei speaks at INBOUND 2025 on Sept. 4, 2025, in San Francisco, California. Right: Defense Secretary Pete Hegseth listens during a Pentagon briefing on April 8, 2026, in Arlington, Virginia. \u003ccite>(Chance Yeh/Getty Images for HubSpot; Andrew Harnik/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Cyber Command — which have both a defensive responsibility and an offensive responsibility — are supposed to think about if we know of a bug, do we use it against America’s enemies, or do we get it fixed to defend America?\u003c/p>\n\u003cp>Are they only gonna use Mythos to find bugs to be used against America’s enemies, or are they going to use it for defensive purposes? And what is Anthropic’s response going to be?\u003c/p>\n\u003cp>Will Anthropic put restrictions so you can only use Mythos for defensive purposes —or will they allow Mythos to be used for offensive purposes?\u003c/p>\n\u003cp>\u003cstrong>Can they even control that once they let them have access to it?\u003c/strong>\u003c/p>\n\u003cp>I don’t know. I don’t think so. For the most part, my understanding is Anthropic’s models that the NSA is using and Cyber Command are probably running in \u003ca href=\"https://aws.amazon.com/bedrock/\">Amazon Bedrock\u003c/a> … what’s called Amazon’s top secret cloud, which means that Anthropic’s employees — at least those without top secret clearance — will not have access to any of the logs there.\u003c/p>\n\u003cp>\u003cstrong>\u003ca id=\"WhywouldAnthropiclimitwhocanusethistechnology\">\u003c/a>A listener writes: ‘If Anthropic lacks capacity to handle Mythos right now, why release it at all? If they want big companies to evaluate it, why publicize it? Seems fishy.’\u003c/strong>\u003c/p>\n\u003cp>I don’t think it’s fishy. This is a normal part of any release process is that you have a small set of testers. They’re also improving it by doing this. Anthropic gets feedback on this.\u003c/p>\n\u003cp>These people find bugs. They also find false positives. If Mythos finds a bug and JPMorgan Chase says, ‘This isn’t a real bug,’ then that goes back into the training set for the next build of Mythos. Anthropic, I think, truly believes they’re doing the right thing here by getting these bugs fixed.\u003c/p>\n\u003cp>\u003cstrong>\u003ca id=\"Whyusesuchapowerfulbutunpredictabletechnologyatall\">\u003c/a>There’s really no going back once this tool is out there, right? But I can hear people asking, why even build these tools in the first place? Why are they even free to do this in the first place if they’re so dangerous and can create such havoc? Is it just inevitable?\u003c/strong>\u003c/p>\n\u003cp>We’re getting philosophical. This is the core conflict at the heart of Anthropic, but also other AI companies’ reason for existence … Part of the argument here is it’s just math. Once these ideas were released, it was inevitable people would have this progress.[aside postID=forum_2010101913607 hero='https://cdn.kqed.org/wp-content/uploads/sites/43/2026/04/GettyImages-2269887514-2000x1331.jpg']It’s not like the atomic bomb, where you have to have uranium and a huge industrial base. This just requires laptops and graphics cards. Other countries, other people, other companies will be doing it.\u003c/p>\n\u003cp>If you believe that you can build an ethical framework to do it well, then you believe that you should do it first and do it correctly. In this case, you could try to mitigate the harm by finding all these bugs and getting them fixed or fixing the software first before other people do it and actually do it harmfully.\u003c/p>\n\u003cp>\u003cstrong>A listener writes: ‘You’re talking about cyberattacks on a large scale with large companies or countries. But what about me? Should I be worried about people hacking into my personal computer or phone or something?’ What can we do?\u003c/strong>\u003c/p>\n\u003cp>About Mythos, nothing. That’s not something that individual people should be dealing with. The way normal people are hacked in 2026 is the same way normal people were hacked in 2016, 2006 and maybe even 1996. The number one way normal people are hacked is they use the same password in every single website all day.\u003c/p>\n\u003cp>Get a password manager and put all your passwords in there. Have it generate random passwords and then have one really good password, and then you can write it down. I know people say don’t write down passwords, but that’s really stupid because nobody can steal the password in your pocket from Russia. If it’s in your wallet or your purse, they can’t reach from five thousand miles away and take it out of your wallet or purse. Nobody mugs you for your password.\u003c/p>\n\u003cp>\u003cstrong>What are we likely to see in the next couple of years with these models rolling out? What should we be prepared for in this sort of initial period?\u003c/strong>\u003c/p>\n\u003cp>Our product road map at Corridor is three months long right now. Because if you plan beyond three months, everything has changed in our industry. For the first time ever, technology is building technology. From a security perspective, a lot depends on which of two futures we’re living in.\u003c/p>\n\u003cp>In the optimistic future, the bug curve flattens out. The superhuman capabilities end up not inventing entirely new classes of vulnerabilities. At least the types of bugs are the kinds we’ve seen before. There’s a finite number of them, and we’re just draining the swamp.\u003c/p>\n\u003cp>The pessimistic future is that these new things invent things that I don’t know exist. The hard part is, I can’t really guess because I am predicting superhuman capabilities here. For superhuman models that are gonna be invented by the models that exist right now. In the pessimistic view, we are going to have to work with AI to rebuild the systems that our lives rely upon, using memory-safe and type-safe languages, using formal models.\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "AI firm Anthropic is investigating a potential breach of its new model, Mythos. But developers say that developing such powerful AI technology is necessary to prevent future — and potentially more dangerous — cyberattacks.",
"status": "publish",
"parent": 0,
"modified": 1777059958,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 46,
"wordCount": 2087
},
"headData": {
"title": "After a Potential Mythos Breach, Why Do Developers Use Such Powerful AI Models? | KQED",
"description": "AI firm Anthropic is investigating a potential breach of its new model, Mythos. But developers say that developing such powerful AI technology is necessary to prevent future — and potentially more dangerous — cyberattacks.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "After a Potential Mythos Breach, Why Do Developers Use Such Powerful AI Models?",
"datePublished": "2026-04-24T11:31:15-07:00",
"dateModified": "2026-04-24T12:45:58-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"sticky": false,
"nprStoryId": "kqed-12081279",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12081279/anthropic-mythos-claude-unauthorized-breach-investigation-cybersecurity",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>\u003ca href=\"https://www.kqed.org/news/tag/ai\">Artificial intelligence\u003c/a> is making life easier for some — and a lot harder for others. San Francisco-based AI firm Anthropic — which also developed the chatbot Claude — earlier this month released Mythos, a powerful model \u003ca href=\"https://red.anthropic.com/2026/mythos-preview/\">developers claim\u003c/a> can identify and exploit “vulnerabilities in every major operating system and every major web browser when directed by a user to do so.”\u003c/p>\n\u003cp>Anthropic has only given a few companies — among them JPMorgan Chase, cybersecurity giant CrowdStrike and fellow AI developers Google and Amazon — access to Mythos as part of what it’s calling “Project Glasswing.” The goal of this partnership, Anthropic \u003ca href=\"https://www.anthropic.com/glasswing\">said\u003c/a>, is to use Mythos to prevent hackers (who \u003ca href=\"https://www.axios.com/2025/11/13/anthropic-china-claude-code-cyberattack\">are using\u003c/a> their own powerful AI models) from targeting the weak spots in the software that helps these massive corporations run.\u003c/p>\n\u003cp>But despite the high level of secrecy surrounding its model, Anthropic confirmed to KQED on Thursday that it is currently investigating a report of “unauthorized access” to Mythos through one of the third-party vendors helping develop the software. The company has not found any evidence yet that Anthropic systems have been affected or that the reported activity extends beyond the third-party vendor environment.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>Even before this latest incident, \u003ca href=\"https://www.cbc.ca/news/business/mythos-anthropic-ai-explainer-9.7171597\">multiple cybersecurity experts\u003c/a> and \u003ca href=\"https://www.reuters.com/business/finance/bessent-powell-warn-bank-ceos-about-anthropic-model-risks-bloomberg-news-reports-2026-04-10/\">global leaders\u003c/a> raised concerns about the power of Mythos and the potential consequences if this software fell into the wrong hands.\u003c/p>\n\u003cp>Earlier this week, KQED’s Forum \u003ca href=\"https://www.kqed.org/forum/2010101913607/anthropics-new-ai-mythos-is-a-cybersecurity-game-changer\">spoke with\u003c/a> Alex Stamos, computer science lecturer at Stanford University and chief product officer for San Francisco-based AI firm Corridor, to understand why developers still move forward with creating such powerful technology despite the potential risks.\u003c/p>\n\u003cp>Keep reading for the takeaways from his conversation with KQED’s Mina Kim, including insights on how folks who are not software engineers can sift through all the buzz surrounding this quickly evolving technology.\u003c/p>\n\u003cp>\u003cstrong>Skip ahead to:\u003c/strong>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"#WhyarecybersecurityexpertssoworriedaboutMythos\">Why are cybersecurity experts so worried about Mythos?\u003c/a>\u003c/li>\n\u003cli>\u003ca href=\"#WhywouldAnthropiclimitwhocanusethistechnology\">Why would Anthropic limit who can use this technology?\u003c/a>\u003c/li>\n\u003cli>\u003ca href=\"#IsthefederalgovernmentalsousingMythos\">Is the federal government also using Mythos?\u003c/a>\u003c/li>\n\u003cli>\u003ca href=\"#Whyusesuchapowerfulbutunpredictabletechnologyatall\">Why use such a powerful — but unpredictable — technology at all?\u003c/a>\u003c/li>\n\u003c/ul>\n\u003cp>\u003cem>This conversation has been edited for length and clarity.\u003c/em>\u003c/p>\n\u003cp>\u003cstrong>Mina Kim: What is Mythos capable of?\u003c/strong>\u003c/p>\n\u003cp>\u003cstrong>Alex Stamos:\u003c/strong> Mythos is a model that Anthropic has not released publicly. They’ve provided it to a very small number of large companies to use privately, as well as to some very important open-source projects to use.\u003c/p>\n\u003cp>Anthropic believes Mythos marks a large-scale change from the AI capabilities that have existed in the past. They’ve now been able to find thousands of vulnerabilities instead of just dozens or hundreds.\u003c/p>\n\u003cp>What we’ve seen in the past is that these things are really good at finding bugs, and they’re much faster than humans. But now Mythos is even better than the best human security consultants and security engineers.\u003c/p>\n\u003cfigure id=\"attachment_12081283\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081283\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AnthropicAP.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AnthropicAP.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AnthropicAP-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AnthropicAP-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">The Anthropic website and the company’s logo are displayed on a computer screen in New York on Feb. 26, 2026. \u003ccite>(Patrick Sison/AP Photo)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>\u003cstrong>\u003ca id=\"WhyarecybersecurityexpertssoworriedaboutMythos\">\u003c/a>You’re describing an incredible tool to find bugs, holes and issues that we have not seen before so that we can defend against them. So why is it scaring people so much?\u003c/strong>\u003c/p>\n\u003cp>It’s scaring people because the first step in attacking a system is finding flaws in that system. In the cybersecurity world, we use a term called the kill chain. This is a term we borrowed from the military.\u003c/p>\n\u003cp>When the military uses it, it refers to discovering an asset, doing reconnaissance, and figuring out how to deliver a weapon on a target.\u003c/p>\n\u003cp>In the cyber world, the kill chain involves reconnaissance, finding a flaw in a system used by a target, weaponizing that flaw, delivering the exploit, establishing command and control of the system, exploring the network, moving through it, and then doing whatever you want — whether that’s stealing data, shutting down a system, or encrypting it for ransom.\u003c/p>\n\u003cp>Major AI companies, like Anthropic and OpenAI, have released threat reports — building on earlier efforts from companies like Facebook and Google— that show how people use these platforms for malicious activity.\u003c/p>\n\u003cp>Those reports show that advanced threat actors are using AI to automate other parts of the attack process, like exploring networks, breaking in and establishing control channels.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12076608",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/Billboard-AI-Illustration_6.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>What we’re seeing is attackers taking tasks that used to require human effort — and therefore had limits — and using AI to make them faster and cheaper.\u003c/p>\n\u003cp>\u003cstrong>And I imagine that our ability to patch or defend against these activities pales in comparison, or am I wrong? Do the patches exist, and are they easy to implement?\u003c/strong>\u003c/p>\n\u003cp>This is where AI can help. AI can find flaws, and it can also write patches. That’s the good news. That’s why Anthropic is providing Mythos to companies and open-source maintainers — not just to find bugs, but to fix them.\u003c/p>\n\u003cp>What we’re trying to do as an industry right now is fix vulnerabilities before adversaries can exploit them. There’s a race underway. The most advanced models — what we call foundation models, like those from Anthropic, OpenAI and Google — are currently ahead of open-weight models, many of which are developed by Chinese companies.\u003c/p>\n\u003cp>\u003cstrong>A listener writes: ‘Anthropic is releasing their models as a warning, but there’s no federal or state guidelines on this. Are we close to government regulatory action at all?’\u003c/strong>\u003c/p>\n\u003cp>The current administration \u003ca href=\"https://www.politico.com/news/2026/04/23/trump-picked-a-fight-with-anthropic-now-the-administration-is-backing-off-00889241\">came down on Anthropic\u003c/a> because they thought they were too ethical … Of the major AI labs, I think Anthropic is the one with the most deep-seated ethical frameworks. I think we’re fortunate that they have the models that are the best at bug-finding, and they’re setting a good standard here.\u003c/p>\n\u003cp>\u003cstrong>\u003ca id=\"IsthefederalgovernmentalsousingMythos\">\u003c/a>Do you know the extent to which the federal government is also using Mythos to search for and patch its own security vulnerabilities?\u003c/strong>\u003c/p>\n\u003cp>My understanding is that U.S. Cyber Command has been testing Mythos. Now the fascinating question is: How is the U.S. government going to use it?\u003c/p>\n\u003cp>In the National Security Agency, after the \u003ca href=\"https://www.theguardian.com/world/interactive/2013/nov/01/snowden-nsa-files-surveillance-revelations-decoded#section/1\">Snowden disclosures\u003c/a>, there is the creation of this thing called the \u003ca href=\"https://trumpwhitehouse.archives.gov/sites/whitehouse.gov/files/images/External%20-%20Unclassified%20VEP%20Charter%20FINAL.PDF\">Vulnerabilities Equities Process\u003c/a>, which is the process by which NSA and U.S.\u003c/p>\n\u003cfigure id=\"attachment_12079281\" class=\"wp-caption aligncenter\" style=\"max-width: 1980px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12079281\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Hegseth-Side-by-Side-c.jpg\" alt=\"\" width=\"1980\" height=\"1460\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Hegseth-Side-by-Side-c.jpg 1980w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Hegseth-Side-by-Side-c-160x118.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Hegseth-Side-by-Side-c-1536x1133.jpg 1536w\" sizes=\"auto, (max-width: 1980px) 100vw, 1980px\">\u003cfigcaption class=\"wp-caption-text\">Left: Anthropic co-founder and CEO Dario Amodei speaks at INBOUND 2025 on Sept. 4, 2025, in San Francisco, California. Right: Defense Secretary Pete Hegseth listens during a Pentagon briefing on April 8, 2026, in Arlington, Virginia. \u003ccite>(Chance Yeh/Getty Images for HubSpot; Andrew Harnik/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Cyber Command — which have both a defensive responsibility and an offensive responsibility — are supposed to think about if we know of a bug, do we use it against America’s enemies, or do we get it fixed to defend America?\u003c/p>\n\u003cp>Are they only gonna use Mythos to find bugs to be used against America’s enemies, or are they going to use it for defensive purposes? And what is Anthropic’s response going to be?\u003c/p>\n\u003cp>Will Anthropic put restrictions so you can only use Mythos for defensive purposes —or will they allow Mythos to be used for offensive purposes?\u003c/p>\n\u003cp>\u003cstrong>Can they even control that once they let them have access to it?\u003c/strong>\u003c/p>\n\u003cp>I don’t know. I don’t think so. For the most part, my understanding is Anthropic’s models that the NSA is using and Cyber Command are probably running in \u003ca href=\"https://aws.amazon.com/bedrock/\">Amazon Bedrock\u003c/a> … what’s called Amazon’s top secret cloud, which means that Anthropic’s employees — at least those without top secret clearance — will not have access to any of the logs there.\u003c/p>\n\u003cp>\u003cstrong>\u003ca id=\"WhywouldAnthropiclimitwhocanusethistechnology\">\u003c/a>A listener writes: ‘If Anthropic lacks capacity to handle Mythos right now, why release it at all? If they want big companies to evaluate it, why publicize it? Seems fishy.’\u003c/strong>\u003c/p>\n\u003cp>I don’t think it’s fishy. This is a normal part of any release process is that you have a small set of testers. They’re also improving it by doing this. Anthropic gets feedback on this.\u003c/p>\n\u003cp>These people find bugs. They also find false positives. If Mythos finds a bug and JPMorgan Chase says, ‘This isn’t a real bug,’ then that goes back into the training set for the next build of Mythos. Anthropic, I think, truly believes they’re doing the right thing here by getting these bugs fixed.\u003c/p>\n\u003cp>\u003cstrong>\u003ca id=\"Whyusesuchapowerfulbutunpredictabletechnologyatall\">\u003c/a>There’s really no going back once this tool is out there, right? But I can hear people asking, why even build these tools in the first place? Why are they even free to do this in the first place if they’re so dangerous and can create such havoc? Is it just inevitable?\u003c/strong>\u003c/p>\n\u003cp>We’re getting philosophical. This is the core conflict at the heart of Anthropic, but also other AI companies’ reason for existence … Part of the argument here is it’s just math. Once these ideas were released, it was inevitable people would have this progress.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "forum_2010101913607",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/43/2026/04/GettyImages-2269887514-2000x1331.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>It’s not like the atomic bomb, where you have to have uranium and a huge industrial base. This just requires laptops and graphics cards. Other countries, other people, other companies will be doing it.\u003c/p>\n\u003cp>If you believe that you can build an ethical framework to do it well, then you believe that you should do it first and do it correctly. In this case, you could try to mitigate the harm by finding all these bugs and getting them fixed or fixing the software first before other people do it and actually do it harmfully.\u003c/p>\n\u003cp>\u003cstrong>A listener writes: ‘You’re talking about cyberattacks on a large scale with large companies or countries. But what about me? Should I be worried about people hacking into my personal computer or phone or something?’ What can we do?\u003c/strong>\u003c/p>\n\u003cp>About Mythos, nothing. That’s not something that individual people should be dealing with. The way normal people are hacked in 2026 is the same way normal people were hacked in 2016, 2006 and maybe even 1996. The number one way normal people are hacked is they use the same password in every single website all day.\u003c/p>\n\u003cp>Get a password manager and put all your passwords in there. Have it generate random passwords and then have one really good password, and then you can write it down. I know people say don’t write down passwords, but that’s really stupid because nobody can steal the password in your pocket from Russia. If it’s in your wallet or your purse, they can’t reach from five thousand miles away and take it out of your wallet or purse. Nobody mugs you for your password.\u003c/p>\n\u003cp>\u003cstrong>What are we likely to see in the next couple of years with these models rolling out? What should we be prepared for in this sort of initial period?\u003c/strong>\u003c/p>\n\u003cp>Our product road map at Corridor is three months long right now. Because if you plan beyond three months, everything has changed in our industry. For the first time ever, technology is building technology. From a security perspective, a lot depends on which of two futures we’re living in.\u003c/p>\n\u003cp>In the optimistic future, the bug curve flattens out. The superhuman capabilities end up not inventing entirely new classes of vulnerabilities. At least the types of bugs are the kinds we’ve seen before. There’s a finite number of them, and we’re just draining the swamp.\u003c/p>\n\u003cp>The pessimistic future is that these new things invent things that I don’t know exist. The hard part is, I can’t really guess because I am predicting superhuman capabilities here. For superhuman models that are gonna be invented by the models that exist right now. In the pessimistic view, we are going to have to work with AI to rebuild the systems that our lives rely upon, using memory-safe and type-safe languages, using formal models.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12081279/anthropic-mythos-claude-unauthorized-breach-investigation-cybersecurity",
"authors": [
"11708",
"243"
],
"categories": [
"news_28250",
"news_8",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_34755",
"news_1386",
"news_17619",
"news_1323",
"news_15",
"news_34586",
"news_1631",
"news_20058",
"news_21417"
],
"featImg": "news_12081306",
"label": "news"
},
"news_12080824": {
"type": "posts",
"id": "news_12080824",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12080824",
"score": null,
"sort": [
1776852022000
]
},
"guestAuthors": [],
"slug": "the-h-1b-visa-process-but-make-it-a-video-game",
"title": "The H-1B Visa Process But Make It a Video Game",
"publishDate": 1776852022,
"format": "audio",
"headTitle": "The H-1B Visa Process But Make It a Video Game | KQED",
"labelTerm": {},
"content": "\u003cp>\u003ca href=\"#episode-transcript\">\u003ci>View the full episode transcript.\u003c/i>\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Life on an H-1B visa — a visa that lets U.S. companies hire foreign-born workers for specialized jobs — is difficult, unpredictable, and has gotten even harder under the Trump administration. A new gaming studio, Reality Reload, is trying to capture that experience in a mobile game. It’s called H1B.Life, and it simulates the difficult choices, competing priorities, and personal sacrifices visa holders face — complete with chaotic design elements, like all-powerful “gods” who control your fate.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">KQED reporter Azul Dahlstrom-Eckman joins Morgan to break down the game’s surprising design choices, the mission behind it, and the stories he heard from people navigating the H1-B process. \u003c/span>\u003c/p>\n\u003cp>\u003c!-- iframe plugin v.4.3 wordpress.org/plugins/iframe/ -->\u003cbr>\n\u003ciframe loading=\"lazy\" frameborder=\"0\" height=\"200\" scrolling=\"no\" src=\"https://playlist.megaphone.fm?e=KQINC2401184331\" width=\"100%\" class=\"iframe-class\">\u003c/iframe>\u003c/p>\n\u003cp>\u003cstrong>Guest:\u003c/strong>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://www.kqed.org/author/adahlstromeckman\">\u003cspan style=\"font-weight: 400\">Azul Dahlstrom-Eckman\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">, reporter at \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">KQED\u003c/span>\u003c/i>\u003c/li>\n\u003c/ul>\n\u003cp>\u003cb>Further Reading/Listening:\u003c/b>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://www.kqed.org/news/12076756/what-does-it-take-to-get-a-h-1b-visa-this-video-game-shows-just-how-complicated-it-is\">\u003cspan style=\"font-weight: 400\">What Does It Take to Get a H-1B Visa? This Video Game Shows Just How Complicated It Is \u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">— Azul Dahlstrom-Eckman, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">KQED\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://www.businessinsider.com/meta-google-amazon-microsoft-h-1b-visa-applications-decline-2026-4\">Meta, Google, and Amazon slash H-1B petitions after Trump’s visa crackdown\u003c/a> — Geoff Weiss, Melia Russell, Andy Kiersz, and Alex Nicoll, \u003ci>Business Insider \u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.insidehighered.com/news/government/state-policy/2026/01/29/faculty-warn-against-state-bans-h-1b-visas\">Faculty Warn Against State Bans on H-1B Visas\u003c/a> — Jessica Blake, \u003ci>\u003ci>Inside Higher Ed \u003c/i>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.the-scientist.com/h-1b-visa-restrictions-will-hurt-america-s-research-potential-experts-say-74267\">H-1B Visa Restrictions Will Hurt America’s Research Potential, Experts Say\u003c/a> — Shelby Bradford, PhD, \u003ci>\u003ci>\u003ci>The Scientist \u003c/i>\u003c/i>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.wired.com/story/trump-immigration-visa-secrutiny-tech/\">US Tech Visa Applications Are Being Put Through the Wringer \u003c/a>— Lauren Goode, \u003ci>\u003ci>\u003ci>\u003ci>Wired \u003c/i>\u003c/i>\u003c/i>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.wired.com/story/made-in-china-a-new-game-turns-the-h-1b-visa-system-into-a-surreal-simulation/\">\u003cspan style=\"font-weight: 400\">A New Game Turns the H-1B Visa System Into a Surreal Simulation \u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">— Zeyi Yang, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">Wired \u003c/span>\u003c/i>\u003c/li>\n\u003c/ul>\n\u003cp>Want to give us feedback on the show? Shoot us an email at \u003ca href=\"mailto:CloseAllTabs@KQED.org\">CloseAllTabs@KQED.org\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Follow us on\u003c/span>\u003ca href=\"https://www.instagram.com/closealltabspod/\"> \u003cspan style=\"font-weight: 400\">Instagram\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> and\u003c/span>\u003ca href=\"https://www.tiktok.com/@closealltabs\"> \u003cspan style=\"font-weight: 400\">TikTok\u003c/span>\u003c/a>\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003ch2 id=\"episode-transcript\">Episode Transcript\u003c/h2>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">This is a computer-generated transcript. While our team has reviewed it, there may be errors.\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Hi, it’s Morgan. Be honest with me, how many tabs do you have open? Feeling a little overwhelmed by closing them? Well, we have an episode for you. If you like our deep dives and wanna hear more, please rate and review us on Spotify, Apple Podcasts, or wherever you listen to the show and tell your friends about us too. Okay, let’s get to the episode. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Quick note: in this episode we use the term “immigrant” in a broad sense to refer to people living and working in the U.S. on H-1B visas. In legal terms the H1-B is a nonimmigrant temporary visa, though many visa holders hope to stay in the U.S. long term. \u003c/span>\u003cspan style=\"font-weight: 400\">Every March, a corner of the Chinese social media app Red Note gets flooded with posts about Chick-fil-A. The users go all out, buying Chick-fil-A keychains, changing their profile pictures to the red and white chicken logo, and of course, treating themselves to a hearty meal of a chicken sandwich and waffle fries. You’ll often see the same emojis in each post: prayer hands, a chicken, and an American flag. \u003c/span>\u003cspan style=\"font-weight: 400\">All of these users are applying for the H-1B visa, a visa for highly skilled immigrants sponsored by an employer. Many come to Silicon Valley to work in tech. Hundreds of thousands of hopefuls apply every year, but only 85,000 applicants are selected. So, what does this have to do with fried chicken? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">It turns out that there’s a tradition amongst Chinese H-1B applicants here in the United States that they believe that eating a lot of Chick-fil-A and just generally associating with Chick-fil-A brings you luck and will increase your chances of getting selected in the H-1B lottery. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Azul Dahlstrom-Eckman is a reporter at KQED. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">And apparently, if you go to a Chick-fil-A here in the Bay Area around March, you’re likely to see a lot of Chinese immigrants who potentially could be H-1B applicants eating there. And it’s not really clear where this started, but it’s definitely a thing. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Azul said that this trend, the annual Chick-fil-A frenzy on social media, is part of a much bigger story. To even apply for an H-1B visa, you need an employer to sponsor you, which means that you need to have a job offer. It doesn’t guarantee a visa, just that you can enter the lottery. The process for getting an H-1B Visa has been changing, and a system that was already difficult has become even harder for applicants. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">It is true that with the Trump administration, there has been a lot of changes specifically to this year’s H-1B visa process. The first is that there’s now a $100,000 fee if a company wants to sponsor somebody who isn’t currently living in the country. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">In wake of the changes and very steep application fee, some universities and companies implemented a hiring freeze for H-1B applicants. And the ones that are still hiring are sponsoring far fewer visas than in previous cycles. The updated application system isn’t totally random anymore. Higher paid applicants have a better chance of being picked now. But for the most part, the application process feels like a game of luck. At the end of the day, it’s still a lottery. Which is why good luck traditions, like getting Chick-fil-A during the registration window, have become baked into the modern mythology of the American immigrant experience. \u003c/span>\u003cspan style=\"font-weight: 400\">It can all feel like a game, one in which the rules seem arbitrary and unpredictable. So when Azul heard about a studio turning that experience into a playable app, it made perfect sense. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">H1B.Life is a game that tries to simulate the experience of an immigrant who’s trying to get H1-B visa status. And it’s a pretty early prototype now. Basically, it’s sort of like a text-based decision tree on your smartphone. And I played a demo of it, and it was actually kind of interesting. \u003c/span>\u003cspan style=\"font-weight: 400\">So one of the opening scenes of the game says, during high school, you spent hours and hours on your laptop binging Gilmore Girls on shady, unauthorized streaming websites. Everything in your drowsy new town reminds you of the show. If it wasn’t for Lorelai and Rory, you might have never decided to… and then there’s like two decisions, and one is study journalism or come to New England. And I was like, wow, that’s really oddly specific. And it turns out that H1B.Life is based on real-life interviews from H1-B applicants, specifically Chinese immigrants living in Silicon Valley. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">We’re diving into H1B.Life today, the arduous application process, how capricious policy changes impact the trajectory of an immigrant’s entire life, and the cost of chasing a dream, all wrapped up in a mobile game. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Morgan Sung: \u003c/b>This is Close All Tabs. I’m Morgan Sung, tech journalist and your chronically online friend, here to open as many browser tabs as it takes to help you understand how the digital world affects our real lives. Let’s get into it. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Morgan Sung: \u003c/b>Before we talk about the game itself, let’s get into the reality that inspired it. And as always, we’re starting by opening a new tab: Life on an H-1B Visa. Last month, the annual Game Developers Conference took over San Francisco. The Asian Art Museum was hosting a showcase for a game that involved chance, timing, and bureaucracy to, “determine who stays and who is deported.” The game was H1B.Life. Azul had heard about the game and decided to check it out. At the event, he talked to a few people about their own experiences with the immigration system. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">The first person I talked to, his name was Donduk Dovdon, and he’s an ethnically Mongolian, Chinese national who now is a U.S. Citizen. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Donduk Dovdon: \u003c/b>\u003cspan style=\"font-weight: 400\">So I came to States 12 years ago for my master’s degree in Washington, D.C. And eventually I got H-1B, and then later I got green card. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Donduk said the process demands a lot of sacrifice and that it can be hard to ever feel secure about the future. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">He told me it is a very hard and arduous process to get H-1B status. And then even once you have H-1B status, you’re still not secure. You have to work towards getting a green card and then citizenship. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Donduk Dovdon: \u003c/b>\u003cspan style=\"font-weight: 400\">I didn’t see my parents for 10 years. I didn’t see any of my relatives for 10 years. So I think that’s still very emotional for me to say. Like, I eventually went back, I think, two years ago when I became citizen. It was so emotional. And I feel… I miss them, they miss me, a huge chapter of our lives…ten years, like many Americans, it’s just unimaginable. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Donduk’s 10-year gap without seeing his family may be on the extreme end. H-1B visa holders are technically allowed to travel internationally and re-enter the country, as long as their visa stamp is still valid. But he’s not alone. The decision to stay in the U.S. is often driven by fear of not being allowed back in. Over the last year, given the heightened scrutiny of visa holders and the Trump administration’s immigration crackdown, some legal experts, universities, and even tech companies who employ visa holders have cautioned against international travel. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Morgan Sung: \u003c/b>Concerns about travel aside, taking time off to visit family abroad often depends on your employer’s time off policy. H-1B visas hinge on employment. Changing jobs involves a new sponsor and another mountain of paperwork. Some H- 1B visa holders have spoken out about feeling trapped abusive work environments because of their visa status. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Donduk Dovdon: \u003c/b>\u003cspan style=\"font-weight: 400\">When I was on H-1B, I met various shady employers who technically did not pay me, which was illegal. And it was like, if you dare to report me, you will get your H-1B revoked. So eventually, I was lucky enough and I left that organization, but I heard other people, in order just to get one H-1B, worked for three years free. And after work, they work at a restaurant or bubble tea store illegally for three years. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Getting picked in the visa lottery doesn’t guarantee long-term stability either. H-1B visas have a 6-year cap and visa holders have to spend a full year outside of the U.S. before they can reapply. Donduk mentioned one of his friends, who’s also Chinese Mongolian. He recently had to say goodbye to her. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Donduk Dovdon: \u003c/b>\u003cspan style=\"font-weight: 400\">She self-deported herself three or four days ago. We were at the airport. She was on H-1B for five years, but no companies was willing to sponsor her green card. She spent 14 years in the States. She even bought a house. And then she sold the house, sold the car, and moved back. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Visa status can be all consuming. At the showcase, Azul spoke with another attendee who’s currently on a different work-based visa. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">He said that whenever he meets up with other visa holders, the number one thing that they ask each other is ‘what visa are you on? ‘ because it has such a powerful determining factor over what you do, who you date, where you live, where you work. You have to keep your employer happy and they have to continue to sponsor you. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Azul Dahlstrom-Eckman: \u003c/b>You might need to take a job that takes you traveling out of the country, but with the Trump administration, you know, maybe it’s hard for you to get back into the country based on your country of origin. So I think people are constantly taking risks and living under uncertainty, you know, from one presidential administration to the next. They’re not sure how these rules surrounding H-1B status are going to change. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">In his reporting, Azul talked to an immigration lawyer based in Silicon Valley, Sophie Alcorn. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">The game metaphor made sense to her that the H-1B process is sort of like a game. She said that her two young sons invite her to play video games when she’s home and she says… \u003c/span>\u003c/p>\n\u003cp>\u003cb>Sophie Alcorn: \u003c/b>\u003cspan style=\"font-weight: 400\">You guys, I’m already playing one of the hardest video games. I don’t need to play another game because the immigration system is so complicated as it is. There’s randomness, there’s luck, there’s skill, there is strategy. There’s trying to go around and collect like, badges and items to upskill to be able to get to the next level just like in a game. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">In games, players are the most affected by the rules, but they also have the least control over them. Right? Players are beholden to the rules but the people that make the rules are not playing the same game. I mean, you could say that we’re now playing on difficulty level hard with the Trump administration. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Okay, let’s talk about the game itself, H1B life. Donduk, the guy who just got his American citizenship, actually thought the prototype he played was too realistic. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">He said that the gameplay was a little triggering for him, it was too real. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">We’re going to get into that after this break. But first, we wanted to remind you that Close All Tabs depends on listeners like you to keep us going. You can support us by becoming a member at donate.kqed.org/podcasts. Okay, more about the game after the break. Stick with us. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Morgan Sung: \u003c/b>Welcome back. We’re getting into this game, H1B.Life. Who is it for? How does it work? And can it really help make sense of the immigration process? Let’s open a new tab: How to play the H1-B Visa game. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Morgan Sung: \u003c/b>The immigration system, at times, can seem like a black box to applicants, lawyers, and maybe most of all, to natural born American citizens who’ve never needed to think about this. The seemingly arbitrary rules that can change at the whims of an unseen entity, the gravity of every single decision, the pressure to succeed as the perfect model immigrant, that is the experience that developers are trying to capture in H1B.Life. \u003c/span>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">[Audio from H1B.Life Trailer] America the big and beautiful country, but you need a visa. Be talented, big brain, build chips, then you might get an H-1B visa .\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">That’s a trailer for H1B.Life, which is still a very long way from being playable. The Kickstarter hasn’t even launched yet, but Azul got to try a demo. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">It’s basically like this text-based decision tree. The top half of the screen is like a text prompt and the bottom half is like, a series of choices. And then as you play through the game, you select different choices. There are these like four core attributes that you need to maintain. It’s intelligence, wealth, social support, and burnout rate, right? So this kind of like simulating. The things that it takes to be a person going through the H-1B visa process. \u003c/span>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">[Audio from H1B.Life Trailer] You make smart choices to get that visa and stay. What separates winning from losing is how you react when fate happens. \u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">As you play, you’re presented with these different choices, like you’re done with your study abroad program and you go back home to Shanghai and you want to pursue journalism. But maybe you can get a job in this field and get an H-1B visa, so you decide to put off your dream and pursue something else. And as you do that, your core attributes sort of change. And so you’re having to sort of weigh those choices. \u003c/span>\u003cspan style=\"font-weight: 400\">You can spend social capital, instead of going to, you know, your friend’s birthday party, you stay late at work because, you know, you’re trying to get sponsored, right? So your social support goes down, but you know your intelligence goes up, right? Like, that’s kind of the balancing act. Then what’s promised in subsequent versions of the game is that if those core attributes run out, it triggers a sort of like slot machine feature where different gods decide players fates, and that’s sort of supposed to describe this random nature of the H-1B visa process. \u003c/span>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">[Audio from H1B.Life Trailer] \u003c/span>\u003c/i>\u003ci>\u003cspan style=\"font-weight: 400\">\u003cbr>\n\u003c/span>\u003c/i>\u003ci>\u003cspan style=\"font-weight: 400\">And don’t forget the immigration gods: code god, free god, fried chicken god, even orange god.\u003c/span>\u003c/i>\u003cspan style=\"font-weight: 400\"> \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Can you tell us more about these different gods in the game? I know there’s one called the orange god. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">So the orange god is the one that caught my eye. The orange god bears a very strong resemblance to Donald Trump. And the orange god claims to control everything and has already changed the policy 500 times before you finish reading the sentence. That’s what the description of the orange is. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Allison Yang: \u003c/b>\u003cspan style=\"font-weight: 400\">He’s the newest God in this universe. He’s very powerful. He can destroy your life any minute he wants, and he usually do. So you have to be very careful of him. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">That’s Alison Yang, the founder of the game studio, Reality Reload. She told Azul about the other gods in the game. So there’s the code god, who looks like a cyborg and is obsessed with tech and optimization. The free god resembles the Statue of Liberty and is supposed to represent the American dream of a free society. And the fried chicken god? A nod to the annual Chick-fil-A tradition. And that god…\u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">…According to the game description, keeps you surviving through the power of fast food grease. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">[Laughter] Reality Reload is a game studio made up of immigrants, developers, designers, and journalists. The founder, Allison, has a background in journalism. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Allison Yang: \u003c/b>\u003cspan style=\"font-weight: 400\">I love journalism, but over the years, as an editor and a reporter, I realized less and less people are reading long form, but there’s so much stories and information we want to pass on. I had the luck to step into the game industry for 7-8 years now, and I realized it’s the opposite. Like, people spend a massive amount of time in a the game. They complained one of my games to be too short, play is two hours. At the same time, they would complain that one of my articles is too long. So I thought, what if I turn it around, like a trojan horse. Like we wrap the news or information into a game and people doesn’t have to know that. They’re just playing something fun and they’re exposed to information anyway. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">So, as Allison told Azul, the point of H1B.Life is to educate people about the complexities of the immigration system. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">I think they started with the H-1B visa because it’s like this caricature of the visa system. It’s highly sought after. It’s very competitive, but they realized that it’s not just H-1B visas, it’s the whole United States Immigration System. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">The Reality Reload team initially designed the game based on their own experiences as Chinese immigrants in Silicon Valley. They conducted dozens of interviews with other Chinese immigrants for storylines in the game. But the team quickly realized that this experience is more universal than they first believed. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Allison Yang: \u003c/b>\u003cspan style=\"font-weight: 400\">And it’s kind of funny because there is internal tensions between people of different origin who are competing for the same visa. But when we were talking to them, we realized it’s the same rat race, and you’re competing with each other not because the other party is evil or better, it’s because you have to. And then everyone’s, or every context, country of origin have their own dilemma. Like when we talk to Latino people from Bolivia…here it’s already very hard to find a job, but people at home find it very difficult to believe they couldn’t find money in the States. They have to mitigate through that. \u003c/span>\u003cspan style=\"font-weight: 400\">\u003cbr>\n\u003c/span>\u003cspan style=\"font-weight: 400\">I talked to my physician who is Indian, and she said their problem is even if they get a visa, there are too many Indian visa holders, they have to wait, I’m sure this number is not right, but she said 100 years to get a green card. So it’s like different versions of a game. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">H1B.Life revolves around choices and rules, which the player may or may not know about until they break one. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Allison Yang: \u003c/b>\u003cspan style=\"font-weight: 400\">And the rules are changing every day. The player usually the one who has the least power or say, but they are the one we have to play through. So that tension is something we want to focus on. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Allison did admit that early versions of the game were maybe too realistic. She told Azul that when they ran play tests, some people, like Donduk, found it a bit traumatic because they’ve dealt with this in real life. Donduk thought the game was triggering and not playful enough for a typical video game. Here’s Azul again. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">But he did think that it could have an application in like corporate diversity trainings. You could imagine like being at Google and a lot of your coworkers are H1B sponsors, being like, wow, I didn’t know that you had to go through that to get here, you know? And then that’s so different from how a United States citizen would get here. So that’s like, one potential application. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">H1B.Life is still a prototype. The Reality Reload team is still interviewing other immigrants to weave their experiences into the story. And they plan to add more fantasy and play to the game before it launches. Azul mentioned one mini game in the works, which involves juggling. Your hands are full with a social life, maintaining grades, and looking for a job that’ll sponsor you, all while checking emails from your immigration lawyer. This is core to the game, managing the tension between competing priorities. It prompts players to consider what they want more: to pursue their dreams, or to fit the mold of an ideal immigrant in order to stay in the country. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Allison Yang: \u003c/b>\u003cspan style=\"font-weight: 400\">At the beginning we thought we were going to do a simple visa simulation game and now we realize it’s more about how people figure out what kind of life they want, where they want it to be. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Which is also a sentiment that almost all employment-based visa holders have to consider. Except, unlike in the game, there’s no decision tree guiding their path. They have to make these choices for themselves. What does life look like when it’s not dictated by a precarious visa status? Let’s open another new tab: the post-visa midlife crisis. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">For Donduk Dovdon, he came here as a student and he was pursuing a master’s degree. And now he needs to decide where he’s going to work, not based on what he wants to do, but based on who will sponsor him for an H-1B visa. After you get an H1B Visa, then you’re on a six-year timeline where you have to hit certain benchmarks in order to get a green card. After 10 years of uncertain visa status…once he got his U.S. citizenship, he basically had like a midlife crisis. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Donduk Dovdon: \u003c/b>\u003cspan style=\"font-weight: 400\">Because before, when I was on H1B, the only thing I had, my goal was to survive and be in this country. So I do whatever it takes to get a job that sponsors me for H-1B. And I do whatever it takes make my boss happy. But when I eventually got a green card, I finally had the privilege to think like an American, like, oh, what do I actually want to do with my life? I think now I’m still figuring out like what do I actually want to do? Now I’m like a 21 years old American, just graduated from college. I have all the opportunities finally opened up for me and I remember when I became citizen and I decided to quit PhD that was the hardest time in my life because like I’m like now finally I can move to anywhere in the States. I can be a bartender in Miami, but do I really want to be? \u003c/span>\u003cspan style=\"font-weight: 400\">\u003cbr>\n\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Donduk Dovdon: \u003c/b>I don’t know. Like I spent two months wondering where should I go next. And I know some other also H-1B workers, they were like of the best coders as a company or program manager or whatever. And then when they got a green card, some guys, I know one guy, he quit and he moved to Midwest and he opened a bakery because that’s what he actually is passionate about. And I think it’s just, it’s like, finally as immigrants, like when we got our green card of citizenship, we finally have the privilege to ponder what Americans did probably around 18 years old, or early 20s, like what do you actually want to achieve? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">What do I really want to do with my life? It’s a conundrum that American citizens can ponder at any age, but most consider it when they’re teenagers or fresh out of college. Maybe a couple years into your career, you realize that it’s not for you and you can pivot. But if your legal status in this country hinges on being able to do one highly specialized job, you don’t get to pivot. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">Your whole reason for being in the country is holding these special degrees, this special job and at the end of the day, that’s not all anyone is. Nobody is just an H-1B visa holder. Like, they’re complex people with multitudes of desires and I think feeling the weight of that lift can be unsettling for people. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Donduk, for one, questioned whether this pressure is worth it for everyone. He told Azul that he was glad to stay in the United States, which, despite everything, is a safer and more free place for him as an openly gay, ethnically Mongolian person. But, if he knew that he could live in China without fear of persecution… \u003c/span>\u003c/p>\n\u003cp>\u003cb>Donduk Dovdon: \u003c/b>\u003cspan style=\"font-weight: 400\">I think the U.S. is getting harder and harder to stay here for immigrants. Like, you have to evaluate, what do you value more. If you can live a comfortable life in your back home country and you value your family connections, do you really want to spend 10, 15 years here just working like a dog to get a green card here? And also we heard many other stories like some immigrants, eventually they moved to Singapore or Canada and they found happiness there. Or even some move to Africa. It’s not like U.S. is the only place you can be happy. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Fewer international students are interested in studying in the U.S. Last year, international enrollment in American universities dropped 17%. Both Texas and Florida have banned H-1B hiring at public universities. Many scientists have raised concerns that the U.S. will lose its competitive edge in research between DOGE enforced funding cuts and H- 1B hiring freezes. But, as for working in the U.S., outside of academia? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">U.S. Citizen and Immigration Services said that they did hit their cap this year. So obviously there is still a demand for H-1B visas, but the Trump administration has made it a lot harder to get an H-1B visa. A lot of the Reality Reload team are Chinese immigrants and coming here, they’ve had the same experiences as the people they interviewed for these stories. I think it is very personal and part of why they wanted to give a voice to this experience because it is so pervasive in, you know, especially in like, the Bay area or other places where there’s a high need for specialized immigrant labor. This is really a huge thing and I think it’s not something that a lot of American citizens are aware of. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">What do you think the game says about the intersection of technology and very bureaucratic systems like the immigration process? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">Immigrants feel the whiplash of American government policies from like, Democrat to Republican, maybe more than most groups here in the country, and how it can upend their lives. And so I think this is a way for immigrants to tell their experiences and for them to feel seen and maybe to inject a little bit of critique into real life. Sometimes, like the experience can feel so arbitrary or so gamified. So maybe a game is the best way to understand and work towards making these processes better, or at least like, explain them. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">I have the privilege of being a natural-born U.S. Citizen. Both of my parents immigrated here when they were young, and I’ve never had to navigate the complexities of the immigration system myself. Many of my close family members have dealt with that, but I admit that even as a journalist, I struggle to differentiate between types of visas and what you can do with them: H-1B, OPT, EB-3, O-1, L-1B, K-1? It’s a dialect that’s unintelligible to a lot of natural born citizens. There’s a whole other world of paperwork and red tape that most Americans never have to think about. But that doesn’t mean it’s unimportant. So how do you get through the doom scrolling and get American citizens to understand the real life impact of these shifting immigration policies?\u003c/span>\u003cspan style=\"font-weight: 400\">\u003cbr>\n\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Morgan Sung: \u003c/b>Like Allison pointed out, people who aren’t inclined to spend 20 minutes reading about visa changes, may be more convinced to spend 20 minutes in a game, trying to avoid the wrath of the orange god. Through surreal slot machines, fickle deities, and some skill juggling, games like H1B.Life can open players up to an unseen reality that exists right in front of them, one that might affect their friends, their coworkers, and their neighbors every day. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Morgan Sung: \u003c/b>Special thanks to Azul Dahlstrom-Eckman for sharing this story along with the interview recordings you heard today. You can find a link to Azul’s story and more about H-1B.Life and the immigrant experience in our show notes. \u003c/span>\u003cspan style=\"font-weight: 400\">Okay, let’s close all these tabs. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Morgan Sung: \u003c/b>Close All Tabs is a production of KQED Studios and is reported and hosted by me, Morgan Sung. This episode was produced by Maya Cueva and edited by Chris egusa, who also composed our theme song and credits music. The Close All Tabs team also includes editor Chris Hambrick and audio engineer, Brendan Willard. Additional music by APM. Audience engagement support from Maha Sanad, Jen Chien is our director of podcasts and Ethan Toven-Lindsey is our editor in chief. Some members of the KQED podcast team are represented by the Screen Actors Guild, American Federation of Television and Radio Artists, San Francisco Northern California Local. This episode’s keyboard sounds were submitted by Alex Tran, and recorded on his white Epomaker Hi75 keyboard with Fogruaden red samurai keycaps and gateron milky yellow pro v2 switches. Thanks for listening. \u003c/span>\u003c/p>\n\u003cp> \u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "A new gaming studio, Reality Reload, is trying to capture the H1-B Visa experience…in a mobile game. ",
"status": "publish",
"parent": 0,
"modified": 1776830876,
"stats": {
"hasAudio": true,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 84,
"wordCount": 5630
},
"headData": {
"title": "The H-1B Visa Process But Make It a Video Game | KQED",
"description": "Life on an H-1B visa — a visa that lets U.S. companies hire foreign-born workers for specialized jobs — is difficult, unpredictable, and has gotten even harder under the Trump administration. A new gaming studio, Reality Reload, is trying to capture that experience in a mobile game. It’s called H1B.Life, and it simulates the difficult choices, competing priorities, and personal sacrifices visa holders face — complete with chaotic design elements, like all-powerful “gods” who control your fate. KQED reporter Azul Dahlstrom-Eckman joins Morgan to break down the game’s surprising design choices, the mission behind it, and the stories he heard from people navigating the H1-B process.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"socialDescription": "Life on an H-1B visa — a visa that lets U.S. companies hire foreign-born workers for specialized jobs — is difficult, unpredictable, and has gotten even harder under the Trump administration. A new gaming studio, Reality Reload, is trying to capture that experience in a mobile game. It’s called H1B.Life, and it simulates the difficult choices, competing priorities, and personal sacrifices visa holders face — complete with chaotic design elements, like all-powerful “gods” who control your fate. KQED reporter Azul Dahlstrom-Eckman joins Morgan to break down the game’s surprising design choices, the mission behind it, and the stories he heard from people navigating the H1-B process.",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "The H-1B Visa Process But Make It a Video Game",
"datePublished": "2026-04-22T03:00:22-07:00",
"dateModified": "2026-04-21T21:07:56-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 33520,
"slug": "podcast",
"name": "Podcast"
},
"source": "Close All Tabs",
"sourceUrl": "https://www.kqed.org/podcasts/closealltabs",
"audioUrl": "https://traffic.megaphone.fm/KQINC2401184331.mp3?updated=1776830126",
"sticky": false,
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12080824/the-h-1b-visa-process-but-make-it-a-video-game",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>\u003ca href=\"#episode-transcript\">\u003ci>View the full episode transcript.\u003c/i>\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Life on an H-1B visa — a visa that lets U.S. companies hire foreign-born workers for specialized jobs — is difficult, unpredictable, and has gotten even harder under the Trump administration. A new gaming studio, Reality Reload, is trying to capture that experience in a mobile game. It’s called H1B.Life, and it simulates the difficult choices, competing priorities, and personal sacrifices visa holders face — complete with chaotic design elements, like all-powerful “gods” who control your fate.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">KQED reporter Azul Dahlstrom-Eckman joins Morgan to break down the game’s surprising design choices, the mission behind it, and the stories he heard from people navigating the H1-B process. \u003c/span>\u003c/p>\n\u003cp>\u003c!-- iframe plugin v.4.3 wordpress.org/plugins/iframe/ -->\u003cbr>\n\u003ciframe loading=\"lazy\" frameborder=\"0\" height=\"200\" scrolling=\"no\" src=\"https://playlist.megaphone.fm?e=KQINC2401184331\" width=\"100%\" class=\"iframe-class\">\u003c/iframe>\u003c/p>\n\u003cp>\u003cstrong>Guest:\u003c/strong>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://www.kqed.org/author/adahlstromeckman\">\u003cspan style=\"font-weight: 400\">Azul Dahlstrom-Eckman\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">, reporter at \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">KQED\u003c/span>\u003c/i>\u003c/li>\n\u003c/ul>\n\u003cp>\u003cb>Further Reading/Listening:\u003c/b>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://www.kqed.org/news/12076756/what-does-it-take-to-get-a-h-1b-visa-this-video-game-shows-just-how-complicated-it-is\">\u003cspan style=\"font-weight: 400\">What Does It Take to Get a H-1B Visa? This Video Game Shows Just How Complicated It Is \u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">— Azul Dahlstrom-Eckman, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">KQED\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://www.businessinsider.com/meta-google-amazon-microsoft-h-1b-visa-applications-decline-2026-4\">Meta, Google, and Amazon slash H-1B petitions after Trump’s visa crackdown\u003c/a> — Geoff Weiss, Melia Russell, Andy Kiersz, and Alex Nicoll, \u003ci>Business Insider \u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.insidehighered.com/news/government/state-policy/2026/01/29/faculty-warn-against-state-bans-h-1b-visas\">Faculty Warn Against State Bans on H-1B Visas\u003c/a> — Jessica Blake, \u003ci>\u003ci>Inside Higher Ed \u003c/i>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.the-scientist.com/h-1b-visa-restrictions-will-hurt-america-s-research-potential-experts-say-74267\">H-1B Visa Restrictions Will Hurt America’s Research Potential, Experts Say\u003c/a> — Shelby Bradford, PhD, \u003ci>\u003ci>\u003ci>The Scientist \u003c/i>\u003c/i>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.wired.com/story/trump-immigration-visa-secrutiny-tech/\">US Tech Visa Applications Are Being Put Through the Wringer \u003c/a>— Lauren Goode, \u003ci>\u003ci>\u003ci>\u003ci>Wired \u003c/i>\u003c/i>\u003c/i>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.wired.com/story/made-in-china-a-new-game-turns-the-h-1b-visa-system-into-a-surreal-simulation/\">\u003cspan style=\"font-weight: 400\">A New Game Turns the H-1B Visa System Into a Surreal Simulation \u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">— Zeyi Yang, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">Wired \u003c/span>\u003c/i>\u003c/li>\n\u003c/ul>\n\u003cp>Want to give us feedback on the show? Shoot us an email at \u003ca href=\"mailto:CloseAllTabs@KQED.org\">CloseAllTabs@KQED.org\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Follow us on\u003c/span>\u003ca href=\"https://www.instagram.com/closealltabspod/\"> \u003cspan style=\"font-weight: 400\">Instagram\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> and\u003c/span>\u003ca href=\"https://www.tiktok.com/@closealltabs\"> \u003cspan style=\"font-weight: 400\">TikTok\u003c/span>\u003c/a>\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-content post-body\">\u003ch2 id=\"episode-transcript\">Episode Transcript\u003c/h2>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">This is a computer-generated transcript. While our team has reviewed it, there may be errors.\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Hi, it’s Morgan. Be honest with me, how many tabs do you have open? Feeling a little overwhelmed by closing them? Well, we have an episode for you. If you like our deep dives and wanna hear more, please rate and review us on Spotify, Apple Podcasts, or wherever you listen to the show and tell your friends about us too. Okay, let’s get to the episode. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Quick note: in this episode we use the term “immigrant” in a broad sense to refer to people living and working in the U.S. on H-1B visas. In legal terms the H1-B is a nonimmigrant temporary visa, though many visa holders hope to stay in the U.S. long term. \u003c/span>\u003cspan style=\"font-weight: 400\">Every March, a corner of the Chinese social media app Red Note gets flooded with posts about Chick-fil-A. The users go all out, buying Chick-fil-A keychains, changing their profile pictures to the red and white chicken logo, and of course, treating themselves to a hearty meal of a chicken sandwich and waffle fries. You’ll often see the same emojis in each post: prayer hands, a chicken, and an American flag. \u003c/span>\u003cspan style=\"font-weight: 400\">All of these users are applying for the H-1B visa, a visa for highly skilled immigrants sponsored by an employer. Many come to Silicon Valley to work in tech. Hundreds of thousands of hopefuls apply every year, but only 85,000 applicants are selected. So, what does this have to do with fried chicken? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">It turns out that there’s a tradition amongst Chinese H-1B applicants here in the United States that they believe that eating a lot of Chick-fil-A and just generally associating with Chick-fil-A brings you luck and will increase your chances of getting selected in the H-1B lottery. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Azul Dahlstrom-Eckman is a reporter at KQED. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">And apparently, if you go to a Chick-fil-A here in the Bay Area around March, you’re likely to see a lot of Chinese immigrants who potentially could be H-1B applicants eating there. And it’s not really clear where this started, but it’s definitely a thing. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Azul said that this trend, the annual Chick-fil-A frenzy on social media, is part of a much bigger story. To even apply for an H-1B visa, you need an employer to sponsor you, which means that you need to have a job offer. It doesn’t guarantee a visa, just that you can enter the lottery. The process for getting an H-1B Visa has been changing, and a system that was already difficult has become even harder for applicants. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">It is true that with the Trump administration, there has been a lot of changes specifically to this year’s H-1B visa process. The first is that there’s now a $100,000 fee if a company wants to sponsor somebody who isn’t currently living in the country. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">In wake of the changes and very steep application fee, some universities and companies implemented a hiring freeze for H-1B applicants. And the ones that are still hiring are sponsoring far fewer visas than in previous cycles. The updated application system isn’t totally random anymore. Higher paid applicants have a better chance of being picked now. But for the most part, the application process feels like a game of luck. At the end of the day, it’s still a lottery. Which is why good luck traditions, like getting Chick-fil-A during the registration window, have become baked into the modern mythology of the American immigrant experience. \u003c/span>\u003cspan style=\"font-weight: 400\">It can all feel like a game, one in which the rules seem arbitrary and unpredictable. So when Azul heard about a studio turning that experience into a playable app, it made perfect sense. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">H1B.Life is a game that tries to simulate the experience of an immigrant who’s trying to get H1-B visa status. And it’s a pretty early prototype now. Basically, it’s sort of like a text-based decision tree on your smartphone. And I played a demo of it, and it was actually kind of interesting. \u003c/span>\u003cspan style=\"font-weight: 400\">So one of the opening scenes of the game says, during high school, you spent hours and hours on your laptop binging Gilmore Girls on shady, unauthorized streaming websites. Everything in your drowsy new town reminds you of the show. If it wasn’t for Lorelai and Rory, you might have never decided to… and then there’s like two decisions, and one is study journalism or come to New England. And I was like, wow, that’s really oddly specific. And it turns out that H1B.Life is based on real-life interviews from H1-B applicants, specifically Chinese immigrants living in Silicon Valley. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">We’re diving into H1B.Life today, the arduous application process, how capricious policy changes impact the trajectory of an immigrant’s entire life, and the cost of chasing a dream, all wrapped up in a mobile game. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Morgan Sung: \u003c/b>This is Close All Tabs. I’m Morgan Sung, tech journalist and your chronically online friend, here to open as many browser tabs as it takes to help you understand how the digital world affects our real lives. Let’s get into it. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Morgan Sung: \u003c/b>Before we talk about the game itself, let’s get into the reality that inspired it. And as always, we’re starting by opening a new tab: Life on an H-1B Visa. Last month, the annual Game Developers Conference took over San Francisco. The Asian Art Museum was hosting a showcase for a game that involved chance, timing, and bureaucracy to, “determine who stays and who is deported.” The game was H1B.Life. Azul had heard about the game and decided to check it out. At the event, he talked to a few people about their own experiences with the immigration system. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">The first person I talked to, his name was Donduk Dovdon, and he’s an ethnically Mongolian, Chinese national who now is a U.S. Citizen. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Donduk Dovdon: \u003c/b>\u003cspan style=\"font-weight: 400\">So I came to States 12 years ago for my master’s degree in Washington, D.C. And eventually I got H-1B, and then later I got green card. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Donduk said the process demands a lot of sacrifice and that it can be hard to ever feel secure about the future. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">He told me it is a very hard and arduous process to get H-1B status. And then even once you have H-1B status, you’re still not secure. You have to work towards getting a green card and then citizenship. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Donduk Dovdon: \u003c/b>\u003cspan style=\"font-weight: 400\">I didn’t see my parents for 10 years. I didn’t see any of my relatives for 10 years. So I think that’s still very emotional for me to say. Like, I eventually went back, I think, two years ago when I became citizen. It was so emotional. And I feel… I miss them, they miss me, a huge chapter of our lives…ten years, like many Americans, it’s just unimaginable. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Donduk’s 10-year gap without seeing his family may be on the extreme end. H-1B visa holders are technically allowed to travel internationally and re-enter the country, as long as their visa stamp is still valid. But he’s not alone. The decision to stay in the U.S. is often driven by fear of not being allowed back in. Over the last year, given the heightened scrutiny of visa holders and the Trump administration’s immigration crackdown, some legal experts, universities, and even tech companies who employ visa holders have cautioned against international travel. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Morgan Sung: \u003c/b>Concerns about travel aside, taking time off to visit family abroad often depends on your employer’s time off policy. H-1B visas hinge on employment. Changing jobs involves a new sponsor and another mountain of paperwork. Some H- 1B visa holders have spoken out about feeling trapped abusive work environments because of their visa status. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Donduk Dovdon: \u003c/b>\u003cspan style=\"font-weight: 400\">When I was on H-1B, I met various shady employers who technically did not pay me, which was illegal. And it was like, if you dare to report me, you will get your H-1B revoked. So eventually, I was lucky enough and I left that organization, but I heard other people, in order just to get one H-1B, worked for three years free. And after work, they work at a restaurant or bubble tea store illegally for three years. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Getting picked in the visa lottery doesn’t guarantee long-term stability either. H-1B visas have a 6-year cap and visa holders have to spend a full year outside of the U.S. before they can reapply. Donduk mentioned one of his friends, who’s also Chinese Mongolian. He recently had to say goodbye to her. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Donduk Dovdon: \u003c/b>\u003cspan style=\"font-weight: 400\">She self-deported herself three or four days ago. We were at the airport. She was on H-1B for five years, but no companies was willing to sponsor her green card. She spent 14 years in the States. She even bought a house. And then she sold the house, sold the car, and moved back. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Visa status can be all consuming. At the showcase, Azul spoke with another attendee who’s currently on a different work-based visa. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">He said that whenever he meets up with other visa holders, the number one thing that they ask each other is ‘what visa are you on? ‘ because it has such a powerful determining factor over what you do, who you date, where you live, where you work. You have to keep your employer happy and they have to continue to sponsor you. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Azul Dahlstrom-Eckman: \u003c/b>You might need to take a job that takes you traveling out of the country, but with the Trump administration, you know, maybe it’s hard for you to get back into the country based on your country of origin. So I think people are constantly taking risks and living under uncertainty, you know, from one presidential administration to the next. They’re not sure how these rules surrounding H-1B status are going to change. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">In his reporting, Azul talked to an immigration lawyer based in Silicon Valley, Sophie Alcorn. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">The game metaphor made sense to her that the H-1B process is sort of like a game. She said that her two young sons invite her to play video games when she’s home and she says… \u003c/span>\u003c/p>\n\u003cp>\u003cb>Sophie Alcorn: \u003c/b>\u003cspan style=\"font-weight: 400\">You guys, I’m already playing one of the hardest video games. I don’t need to play another game because the immigration system is so complicated as it is. There’s randomness, there’s luck, there’s skill, there is strategy. There’s trying to go around and collect like, badges and items to upskill to be able to get to the next level just like in a game. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">In games, players are the most affected by the rules, but they also have the least control over them. Right? Players are beholden to the rules but the people that make the rules are not playing the same game. I mean, you could say that we’re now playing on difficulty level hard with the Trump administration. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Okay, let’s talk about the game itself, H1B life. Donduk, the guy who just got his American citizenship, actually thought the prototype he played was too realistic. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">He said that the gameplay was a little triggering for him, it was too real. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">We’re going to get into that after this break. But first, we wanted to remind you that Close All Tabs depends on listeners like you to keep us going. You can support us by becoming a member at donate.kqed.org/podcasts. Okay, more about the game after the break. Stick with us. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Morgan Sung: \u003c/b>Welcome back. We’re getting into this game, H1B.Life. Who is it for? How does it work? And can it really help make sense of the immigration process? Let’s open a new tab: How to play the H1-B Visa game. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Morgan Sung: \u003c/b>The immigration system, at times, can seem like a black box to applicants, lawyers, and maybe most of all, to natural born American citizens who’ve never needed to think about this. The seemingly arbitrary rules that can change at the whims of an unseen entity, the gravity of every single decision, the pressure to succeed as the perfect model immigrant, that is the experience that developers are trying to capture in H1B.Life. \u003c/span>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">[Audio from H1B.Life Trailer] America the big and beautiful country, but you need a visa. Be talented, big brain, build chips, then you might get an H-1B visa .\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">That’s a trailer for H1B.Life, which is still a very long way from being playable. The Kickstarter hasn’t even launched yet, but Azul got to try a demo. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">It’s basically like this text-based decision tree. The top half of the screen is like a text prompt and the bottom half is like, a series of choices. And then as you play through the game, you select different choices. There are these like four core attributes that you need to maintain. It’s intelligence, wealth, social support, and burnout rate, right? So this kind of like simulating. The things that it takes to be a person going through the H-1B visa process. \u003c/span>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">[Audio from H1B.Life Trailer] You make smart choices to get that visa and stay. What separates winning from losing is how you react when fate happens. \u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">As you play, you’re presented with these different choices, like you’re done with your study abroad program and you go back home to Shanghai and you want to pursue journalism. But maybe you can get a job in this field and get an H-1B visa, so you decide to put off your dream and pursue something else. And as you do that, your core attributes sort of change. And so you’re having to sort of weigh those choices. \u003c/span>\u003cspan style=\"font-weight: 400\">You can spend social capital, instead of going to, you know, your friend’s birthday party, you stay late at work because, you know, you’re trying to get sponsored, right? So your social support goes down, but you know your intelligence goes up, right? Like, that’s kind of the balancing act. Then what’s promised in subsequent versions of the game is that if those core attributes run out, it triggers a sort of like slot machine feature where different gods decide players fates, and that’s sort of supposed to describe this random nature of the H-1B visa process. \u003c/span>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">[Audio from H1B.Life Trailer] \u003c/span>\u003c/i>\u003ci>\u003cspan style=\"font-weight: 400\">\u003cbr>\n\u003c/span>\u003c/i>\u003ci>\u003cspan style=\"font-weight: 400\">And don’t forget the immigration gods: code god, free god, fried chicken god, even orange god.\u003c/span>\u003c/i>\u003cspan style=\"font-weight: 400\"> \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Can you tell us more about these different gods in the game? I know there’s one called the orange god. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">So the orange god is the one that caught my eye. The orange god bears a very strong resemblance to Donald Trump. And the orange god claims to control everything and has already changed the policy 500 times before you finish reading the sentence. That’s what the description of the orange is. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Allison Yang: \u003c/b>\u003cspan style=\"font-weight: 400\">He’s the newest God in this universe. He’s very powerful. He can destroy your life any minute he wants, and he usually do. So you have to be very careful of him. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">That’s Alison Yang, the founder of the game studio, Reality Reload. She told Azul about the other gods in the game. So there’s the code god, who looks like a cyborg and is obsessed with tech and optimization. The free god resembles the Statue of Liberty and is supposed to represent the American dream of a free society. And the fried chicken god? A nod to the annual Chick-fil-A tradition. And that god…\u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">…According to the game description, keeps you surviving through the power of fast food grease. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">[Laughter] Reality Reload is a game studio made up of immigrants, developers, designers, and journalists. The founder, Allison, has a background in journalism. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Allison Yang: \u003c/b>\u003cspan style=\"font-weight: 400\">I love journalism, but over the years, as an editor and a reporter, I realized less and less people are reading long form, but there’s so much stories and information we want to pass on. I had the luck to step into the game industry for 7-8 years now, and I realized it’s the opposite. Like, people spend a massive amount of time in a the game. They complained one of my games to be too short, play is two hours. At the same time, they would complain that one of my articles is too long. So I thought, what if I turn it around, like a trojan horse. Like we wrap the news or information into a game and people doesn’t have to know that. They’re just playing something fun and they’re exposed to information anyway. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">So, as Allison told Azul, the point of H1B.Life is to educate people about the complexities of the immigration system. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">I think they started with the H-1B visa because it’s like this caricature of the visa system. It’s highly sought after. It’s very competitive, but they realized that it’s not just H-1B visas, it’s the whole United States Immigration System. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">The Reality Reload team initially designed the game based on their own experiences as Chinese immigrants in Silicon Valley. They conducted dozens of interviews with other Chinese immigrants for storylines in the game. But the team quickly realized that this experience is more universal than they first believed. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Allison Yang: \u003c/b>\u003cspan style=\"font-weight: 400\">And it’s kind of funny because there is internal tensions between people of different origin who are competing for the same visa. But when we were talking to them, we realized it’s the same rat race, and you’re competing with each other not because the other party is evil or better, it’s because you have to. And then everyone’s, or every context, country of origin have their own dilemma. Like when we talk to Latino people from Bolivia…here it’s already very hard to find a job, but people at home find it very difficult to believe they couldn’t find money in the States. They have to mitigate through that. \u003c/span>\u003cspan style=\"font-weight: 400\">\u003cbr>\n\u003c/span>\u003cspan style=\"font-weight: 400\">I talked to my physician who is Indian, and she said their problem is even if they get a visa, there are too many Indian visa holders, they have to wait, I’m sure this number is not right, but she said 100 years to get a green card. So it’s like different versions of a game. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">H1B.Life revolves around choices and rules, which the player may or may not know about until they break one. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Allison Yang: \u003c/b>\u003cspan style=\"font-weight: 400\">And the rules are changing every day. The player usually the one who has the least power or say, but they are the one we have to play through. So that tension is something we want to focus on. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Allison did admit that early versions of the game were maybe too realistic. She told Azul that when they ran play tests, some people, like Donduk, found it a bit traumatic because they’ve dealt with this in real life. Donduk thought the game was triggering and not playful enough for a typical video game. Here’s Azul again. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">But he did think that it could have an application in like corporate diversity trainings. You could imagine like being at Google and a lot of your coworkers are H1B sponsors, being like, wow, I didn’t know that you had to go through that to get here, you know? And then that’s so different from how a United States citizen would get here. So that’s like, one potential application. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">H1B.Life is still a prototype. The Reality Reload team is still interviewing other immigrants to weave their experiences into the story. And they plan to add more fantasy and play to the game before it launches. Azul mentioned one mini game in the works, which involves juggling. Your hands are full with a social life, maintaining grades, and looking for a job that’ll sponsor you, all while checking emails from your immigration lawyer. This is core to the game, managing the tension between competing priorities. It prompts players to consider what they want more: to pursue their dreams, or to fit the mold of an ideal immigrant in order to stay in the country. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Allison Yang: \u003c/b>\u003cspan style=\"font-weight: 400\">At the beginning we thought we were going to do a simple visa simulation game and now we realize it’s more about how people figure out what kind of life they want, where they want it to be. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Which is also a sentiment that almost all employment-based visa holders have to consider. Except, unlike in the game, there’s no decision tree guiding their path. They have to make these choices for themselves. What does life look like when it’s not dictated by a precarious visa status? Let’s open another new tab: the post-visa midlife crisis. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">For Donduk Dovdon, he came here as a student and he was pursuing a master’s degree. And now he needs to decide where he’s going to work, not based on what he wants to do, but based on who will sponsor him for an H-1B visa. After you get an H1B Visa, then you’re on a six-year timeline where you have to hit certain benchmarks in order to get a green card. After 10 years of uncertain visa status…once he got his U.S. citizenship, he basically had like a midlife crisis. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Donduk Dovdon: \u003c/b>\u003cspan style=\"font-weight: 400\">Because before, when I was on H1B, the only thing I had, my goal was to survive and be in this country. So I do whatever it takes to get a job that sponsors me for H-1B. And I do whatever it takes make my boss happy. But when I eventually got a green card, I finally had the privilege to think like an American, like, oh, what do I actually want to do with my life? I think now I’m still figuring out like what do I actually want to do? Now I’m like a 21 years old American, just graduated from college. I have all the opportunities finally opened up for me and I remember when I became citizen and I decided to quit PhD that was the hardest time in my life because like I’m like now finally I can move to anywhere in the States. I can be a bartender in Miami, but do I really want to be? \u003c/span>\u003cspan style=\"font-weight: 400\">\u003cbr>\n\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Donduk Dovdon: \u003c/b>I don’t know. Like I spent two months wondering where should I go next. And I know some other also H-1B workers, they were like of the best coders as a company or program manager or whatever. And then when they got a green card, some guys, I know one guy, he quit and he moved to Midwest and he opened a bakery because that’s what he actually is passionate about. And I think it’s just, it’s like, finally as immigrants, like when we got our green card of citizenship, we finally have the privilege to ponder what Americans did probably around 18 years old, or early 20s, like what do you actually want to achieve? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">What do I really want to do with my life? It’s a conundrum that American citizens can ponder at any age, but most consider it when they’re teenagers or fresh out of college. Maybe a couple years into your career, you realize that it’s not for you and you can pivot. But if your legal status in this country hinges on being able to do one highly specialized job, you don’t get to pivot. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">Your whole reason for being in the country is holding these special degrees, this special job and at the end of the day, that’s not all anyone is. Nobody is just an H-1B visa holder. Like, they’re complex people with multitudes of desires and I think feeling the weight of that lift can be unsettling for people. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Donduk, for one, questioned whether this pressure is worth it for everyone. He told Azul that he was glad to stay in the United States, which, despite everything, is a safer and more free place for him as an openly gay, ethnically Mongolian person. But, if he knew that he could live in China without fear of persecution… \u003c/span>\u003c/p>\n\u003cp>\u003cb>Donduk Dovdon: \u003c/b>\u003cspan style=\"font-weight: 400\">I think the U.S. is getting harder and harder to stay here for immigrants. Like, you have to evaluate, what do you value more. If you can live a comfortable life in your back home country and you value your family connections, do you really want to spend 10, 15 years here just working like a dog to get a green card here? And also we heard many other stories like some immigrants, eventually they moved to Singapore or Canada and they found happiness there. Or even some move to Africa. It’s not like U.S. is the only place you can be happy. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Fewer international students are interested in studying in the U.S. Last year, international enrollment in American universities dropped 17%. Both Texas and Florida have banned H-1B hiring at public universities. Many scientists have raised concerns that the U.S. will lose its competitive edge in research between DOGE enforced funding cuts and H- 1B hiring freezes. But, as for working in the U.S., outside of academia? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">U.S. Citizen and Immigration Services said that they did hit their cap this year. So obviously there is still a demand for H-1B visas, but the Trump administration has made it a lot harder to get an H-1B visa. A lot of the Reality Reload team are Chinese immigrants and coming here, they’ve had the same experiences as the people they interviewed for these stories. I think it is very personal and part of why they wanted to give a voice to this experience because it is so pervasive in, you know, especially in like, the Bay area or other places where there’s a high need for specialized immigrant labor. This is really a huge thing and I think it’s not something that a lot of American citizens are aware of. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">What do you think the game says about the intersection of technology and very bureaucratic systems like the immigration process? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Azul Dahlstrom-Eckman: \u003c/b>\u003cspan style=\"font-weight: 400\">Immigrants feel the whiplash of American government policies from like, Democrat to Republican, maybe more than most groups here in the country, and how it can upend their lives. And so I think this is a way for immigrants to tell their experiences and for them to feel seen and maybe to inject a little bit of critique into real life. Sometimes, like the experience can feel so arbitrary or so gamified. So maybe a game is the best way to understand and work towards making these processes better, or at least like, explain them. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">I have the privilege of being a natural-born U.S. Citizen. Both of my parents immigrated here when they were young, and I’ve never had to navigate the complexities of the immigration system myself. Many of my close family members have dealt with that, but I admit that even as a journalist, I struggle to differentiate between types of visas and what you can do with them: H-1B, OPT, EB-3, O-1, L-1B, K-1? It’s a dialect that’s unintelligible to a lot of natural born citizens. There’s a whole other world of paperwork and red tape that most Americans never have to think about. But that doesn’t mean it’s unimportant. So how do you get through the doom scrolling and get American citizens to understand the real life impact of these shifting immigration policies?\u003c/span>\u003cspan style=\"font-weight: 400\">\u003cbr>\n\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Morgan Sung: \u003c/b>Like Allison pointed out, people who aren’t inclined to spend 20 minutes reading about visa changes, may be more convinced to spend 20 minutes in a game, trying to avoid the wrath of the orange god. Through surreal slot machines, fickle deities, and some skill juggling, games like H1B.Life can open players up to an unseen reality that exists right in front of them, one that might affect their friends, their coworkers, and their neighbors every day. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Morgan Sung: \u003c/b>Special thanks to Azul Dahlstrom-Eckman for sharing this story along with the interview recordings you heard today. You can find a link to Azul’s story and more about H-1B.Life and the immigrant experience in our show notes. \u003c/span>\u003cspan style=\"font-weight: 400\">Okay, let’s close all these tabs. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">\u003cb>Morgan Sung: \u003c/b>Close All Tabs is a production of KQED Studios and is reported and hosted by me, Morgan Sung. This episode was produced by Maya Cueva and edited by Chris egusa, who also composed our theme song and credits music. The Close All Tabs team also includes editor Chris Hambrick and audio engineer, Brendan Willard. Additional music by APM. Audience engagement support from Maha Sanad, Jen Chien is our director of podcasts and Ethan Toven-Lindsey is our editor in chief. Some members of the KQED podcast team are represented by the Screen Actors Guild, American Federation of Television and Radio Artists, San Francisco Northern California Local. This episode’s keyboard sounds were submitted by Alex Tran, and recorded on his white Epomaker Hi75 keyboard with Fogruaden red samurai keycaps and gateron milky yellow pro v2 switches. Thanks for listening. \u003c/span>\u003c/p>\n\u003cp> \u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>"
}
],
"link": "/news/12080824/the-h-1b-visa-process-but-make-it-a-video-game",
"authors": [
"11944",
"11943",
"11869",
"11832"
],
"programs": [
"news_35082"
],
"categories": [
"news_33520"
],
"tags": [
"news_22973",
"news_20526",
"news_20611",
"news_3137",
"news_34646",
"news_1631",
"news_5702",
"news_35248"
],
"featImg": "news_12080827",
"label": "source_news_12080824"
}
},
"programsReducer": {
"all-things-considered": {
"id": "all-things-considered",
"title": "All Things Considered",
"info": "Every weekday, \u003cem>All Things Considered\u003c/em> hosts Robert Siegel, Audie Cornish, Ari Shapiro, and Kelly McEvers present the program's trademark mix of news, interviews, commentaries, reviews, and offbeat features. Michel Martin hosts on the weekends.",
"airtime": "MON-FRI 1pm-2pm, 4:30pm-6:30pm\u003cbr />SAT-SUN 5pm-6pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/All-Things-Considered-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/all-things-considered/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/all-things-considered"
},
"american-suburb-podcast": {
"id": "american-suburb-podcast",
"title": "American Suburb: The Podcast",
"tagline": "The flip side of gentrification, told through one town",
"info": "Gentrification is changing cities across America, forcing people from neighborhoods they have long called home. Call them the displaced. Now those priced out of the Bay Area are looking for a better life in an unlikely place. American Suburb follows this migration to one California town along the Delta, 45 miles from San Francisco. But is this once sleepy suburb ready for them?",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/American-Suburb-Podcast-Tile-703x703-1.jpg",
"officialWebsiteLink": "/news/series/american-suburb-podcast",
"meta": {
"site": "news",
"source": "kqed",
"order": 19
},
"link": "/news/series/american-suburb-podcast/",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/RBrW",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?mt=2&id=1287748328",
"tuneIn": "https://tunein.com/radio/American-Suburb-p1086805/",
"rss": "https://ww2.kqed.org/news/series/american-suburb-podcast/feed/podcast",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkMzMDExODgxNjA5"
}
},
"baycurious": {
"id": "baycurious",
"title": "Bay Curious",
"tagline": "Exploring the Bay Area, one question at a time",
"info": "KQED’s new podcast, Bay Curious, gets to the bottom of the mysteries — both profound and peculiar — that give the Bay Area its unique identity. And we’ll do it with your help! You ask the questions. You decide what Bay Curious investigates. And you join us on the journey to find the answers.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Bay-Curious-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Bay Curious",
"officialWebsiteLink": "/news/series/baycurious",
"meta": {
"site": "news",
"source": "kqed",
"order": 3
},
"link": "/podcasts/baycurious",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/bay-curious/id1172473406",
"npr": "https://www.npr.org/podcasts/500557090/bay-curious",
"rss": "https://ww2.kqed.org/news/category/bay-curious-podcast/feed/podcast",
"amazon": "https://music.amazon.com/podcasts/9a90d476-aa04-455d-9a4c-0871ed6216d4/bay-curious",
"stitcher": "https://www.stitcher.com/podcast/kqed/bay-curious",
"spotify": "https://open.spotify.com/show/6O76IdmhixfijmhTZLIJ8k"
}
},
"bbc-world-service": {
"id": "bbc-world-service",
"title": "BBC World Service",
"info": "The day's top stories from BBC News compiled twice daily in the week, once at weekends.",
"airtime": "MON-FRI 9pm-10pm, TUE-FRI 1am-2am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/BBC-World-Service-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.bbc.co.uk/sounds/play/live:bbc_world_service",
"meta": {
"site": "news",
"source": "BBC World Service"
},
"link": "/radio/program/bbc-world-service",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/global-news-podcast/id135067274?mt=2",
"tuneIn": "https://tunein.com/radio/BBC-World-Service-p455581/",
"rss": "https://podcasts.files.bbci.co.uk/p02nq0gn.rss"
}
},
"californiareport": {
"id": "californiareport",
"title": "The California Report",
"tagline": "California, day by day",
"info": "KQED’s statewide radio news program providing daily coverage of issues, trends and public policy decisions.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-California-Report-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The California Report",
"officialWebsiteLink": "/californiareport",
"meta": {
"site": "news",
"source": "kqed",
"order": 8
},
"link": "/californiareport",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/kqeds-the-california-report/id79681292",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM1MDAyODE4NTgz",
"npr": "https://www.npr.org/podcasts/432285393/the-california-report",
"stitcher": "https://www.stitcher.com/podcast/kqedfm-kqeds-the-california-report-podcast-8838",
"rss": "https://ww2.kqed.org/news/tag/tcram/feed/podcast"
}
},
"californiareportmagazine": {
"id": "californiareportmagazine",
"title": "The California Report Magazine",
"tagline": "Your state, your stories",
"info": "Every week, The California Report Magazine takes you on a road trip for the ears: to visit the places and meet the people who make California unique. The in-depth storytelling podcast from the California Report.",
"airtime": "FRI 4:30pm-5pm, 6:30pm-7pm, 11pm-11:30pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-California-Report-Magazine-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The California Report Magazine",
"officialWebsiteLink": "/californiareportmagazine",
"meta": {
"site": "news",
"source": "kqed",
"order": 10
},
"link": "/californiareportmagazine",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-california-report-magazine/id1314750545",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM3NjkwNjk1OTAz",
"npr": "https://www.npr.org/podcasts/564733126/the-california-report-magazine",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-california-report-magazine",
"rss": "https://ww2.kqed.org/news/tag/tcrmag/feed/podcast"
}
},
"city-arts": {
"id": "city-arts",
"title": "City Arts & Lectures",
"info": "A one-hour radio program to hear celebrated writers, artists and thinkers address contemporary ideas and values, often discussing the creative process. Please note: tapes or transcripts are not available",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/05/cityartsandlecture-300x300.jpg",
"officialWebsiteLink": "https://www.cityarts.net/",
"airtime": "SUN 1pm-2pm, TUE 10pm, WED 1am",
"meta": {
"site": "news",
"source": "City Arts & Lectures"
},
"link": "https://www.cityarts.net",
"subscribe": {
"tuneIn": "https://tunein.com/radio/City-Arts-and-Lectures-p692/",
"rss": "https://www.cityarts.net/feed/"
}
},
"closealltabs": {
"id": "closealltabs",
"title": "Close All Tabs",
"tagline": "Your irreverent guide to the trends redefining our world",
"info": "Close All Tabs breaks down how digital culture shapes our world through thoughtful insights and irreverent humor.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/02/CAT_2_Tile-scaled.jpg",
"imageAlt": "KQED Close All Tabs",
"officialWebsiteLink": "/podcasts/closealltabs",
"meta": {
"site": "news",
"source": "kqed",
"order": 1
},
"link": "/podcasts/closealltabs",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/close-all-tabs/id214663465",
"rss": "https://feeds.megaphone.fm/KQINC6993880386",
"amazon": "https://music.amazon.com/podcasts/92d9d4ac-67a3-4eed-b10a-fb45d45b1ef2/close-all-tabs",
"spotify": "https://open.spotify.com/show/6LAJFHnGK1pYXYzv6SIol6?si=deb0cae19813417c"
}
},
"code-switch-life-kit": {
"id": "code-switch-life-kit",
"title": "Code Switch / Life Kit",
"info": "\u003cem>Code Switch\u003c/em>, which listeners will hear in the first part of the hour, has fearless and much-needed conversations about race. Hosted by journalists of color, the show tackles the subject of race head-on, exploring how it impacts every part of society — from politics and pop culture to history, sports and more.\u003cbr />\u003cbr />\u003cem>Life Kit\u003c/em>, which will be in the second part of the hour, guides you through spaces and feelings no one prepares you for — from finances to mental health, from workplace microaggressions to imposter syndrome, from relationships to parenting. The show features experts with real world experience and shares their knowledge. Because everyone needs a little help being human.\u003cbr />\u003cbr />\u003ca href=\"https://www.npr.org/podcasts/510312/codeswitch\">\u003cem>Code Switch\u003c/em> offical site and podcast\u003c/a>\u003cbr />\u003ca href=\"https://www.npr.org/lifekit\">\u003cem>Life Kit\u003c/em> offical site and podcast\u003c/a>\u003cbr />",
"airtime": "SUN 9pm-10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Code-Switch-Life-Kit-Podcast-Tile-360x360-1.jpg",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/code-switch-life-kit",
"subscribe": {
"apple": "https://podcasts.apple.com/podcast/1112190608?mt=2&at=11l79Y&ct=nprdirectory",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93d3cubnByLm9yZy9yc3MvcG9kY2FzdC5waHA_aWQ9NTEwMzEy",
"spotify": "https://open.spotify.com/show/3bExJ9JQpkwNhoHvaIIuyV",
"rss": "https://feeds.npr.org/510312/podcast.xml"
}
},
"commonwealth-club": {
"id": "commonwealth-club",
"title": "Commonwealth Club of California Podcast",
"info": "The Commonwealth Club of California is the nation's oldest and largest public affairs forum. As a non-partisan forum, The Club brings to the public airwaves diverse viewpoints on important topics. The Club's weekly radio broadcast - the oldest in the U.S., dating back to 1924 - is carried across the nation on public radio stations and is now podcasting. Our website archive features audio of our recent programs, as well as selected speeches from our long and distinguished history. This podcast feed is usually updated twice a week and is always un-edited.",
"airtime": "THU 10pm, FRI 1am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Commonwealth-Club-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.commonwealthclub.org/podcasts",
"meta": {
"site": "news",
"source": "Commonwealth Club of California"
},
"link": "/radio/program/commonwealth-club",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/commonwealth-club-of-california-podcast/id976334034?mt=2",
"google": "https://podcasts.google.com/feed/aHR0cDovL3d3dy5jb21tb253ZWFsdGhjbHViLm9yZy9hdWRpby9wb2RjYXN0L3dlZWtseS54bWw",
"tuneIn": "https://tunein.com/radio/Commonwealth-Club-of-California-p1060/"
}
},
"forum": {
"id": "forum",
"title": "Forum",
"tagline": "The conversation starts here",
"info": "KQED’s live call-in program discussing local, state, national and international issues, as well as in-depth interviews.",
"airtime": "MON-FRI 9am-11am, 10pm-11pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Forum-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Forum with Mina Kim and Alexis Madrigal",
"officialWebsiteLink": "/forum",
"meta": {
"site": "news",
"source": "kqed",
"order": 9
},
"link": "/forum",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/kqeds-forum/id73329719",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM5NTU3MzgxNjMz",
"npr": "https://www.npr.org/podcasts/432307980/forum",
"stitcher": "https://www.stitcher.com/podcast/kqedfm-kqeds-forum-podcast",
"rss": "https://feeds.megaphone.fm/KQINC9557381633"
}
},
"freakonomics-radio": {
"id": "freakonomics-radio",
"title": "Freakonomics Radio",
"info": "Freakonomics Radio is a one-hour award-winning podcast and public-radio project hosted by Stephen Dubner, with co-author Steve Levitt as a regular guest. It is produced in partnership with WNYC.",
"imageSrc": "https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/05/freakonomicsRadio.png",
"officialWebsiteLink": "http://freakonomics.com/",
"airtime": "SUN 1am-2am, SAT 3pm-4pm",
"meta": {
"site": "radio",
"source": "WNYC"
},
"link": "/radio/program/freakonomics-radio",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/4s8b",
"apple": "https://itunes.apple.com/us/podcast/freakonomics-radio/id354668519",
"tuneIn": "https://tunein.com/podcasts/WNYC-Podcasts/Freakonomics-Radio-p272293/",
"rss": "https://feeds.feedburner.com/freakonomicsradio"
}
},
"fresh-air": {
"id": "fresh-air",
"title": "Fresh Air",
"info": "Hosted by Terry Gross, \u003cem>Fresh Air from WHYY\u003c/em> is the Peabody Award-winning weekday magazine of contemporary arts and issues. One of public radio's most popular programs, Fresh Air features intimate conversations with today's biggest luminaries.",
"airtime": "MON-FRI 7pm-8pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Fresh-Air-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/fresh-air/",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/fresh-air",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/4s8b",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=214089682&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Fresh-Air-p17/",
"rss": "https://feeds.npr.org/381444908/podcast.xml"
}
},
"here-and-now": {
"id": "here-and-now",
"title": "Here & Now",
"info": "A live production of NPR and WBUR Boston, in collaboration with stations across the country, Here & Now reflects the fluid world of news as it's happening in the middle of the day, with timely, in-depth news, interviews and conversation. Hosted by Robin Young, Jeremy Hobson and Tonya Mosley.",
"airtime": "MON-THU 11am-12pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Here-And-Now-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "http://www.wbur.org/hereandnow",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/here-and-now",
"subsdcribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?mt=2&id=426698661",
"tuneIn": "https://tunein.com/radio/Here--Now-p211/",
"rss": "https://feeds.npr.org/510051/podcast.xml"
}
},
"hidden-brain": {
"id": "hidden-brain",
"title": "Hidden Brain",
"info": "Shankar Vedantam uses science and storytelling to reveal the unconscious patterns that drive human behavior, shape our choices and direct our relationships.",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/05/hiddenbrain.jpg",
"officialWebsiteLink": "https://www.npr.org/series/423302056/hidden-brain",
"airtime": "SUN 7pm-8pm",
"meta": {
"site": "news",
"source": "NPR"
},
"link": "/radio/program/hidden-brain",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/hidden-brain/id1028908750?mt=2",
"tuneIn": "https://tunein.com/podcasts/Science-Podcasts/Hidden-Brain-p787503/",
"rss": "https://feeds.npr.org/510308/podcast.xml"
}
},
"how-i-built-this": {
"id": "how-i-built-this",
"title": "How I Built This with Guy Raz",
"info": "Guy Raz dives into the stories behind some of the world's best known companies. How I Built This weaves a narrative journey about innovators, entrepreneurs and idealists—and the movements they built.",
"imageSrc": "https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/05/howIBuiltThis.png",
"officialWebsiteLink": "https://www.npr.org/podcasts/510313/how-i-built-this",
"airtime": "SUN 7:30pm-8pm",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/how-i-built-this",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/3zxy",
"apple": "https://itunes.apple.com/us/podcast/how-i-built-this-with-guy-raz/id1150510297?mt=2",
"tuneIn": "https://tunein.com/podcasts/Arts--Culture-Podcasts/How-I-Built-This-p910896/",
"rss": "https://feeds.npr.org/510313/podcast.xml"
}
},
"hyphenacion": {
"id": "hyphenacion",
"title": "Hyphenación",
"tagline": "Where conversation and cultura meet",
"info": "What kind of no sabo word is Hyphenación? For us, it’s about living within a hyphenation. Like being a third-gen Mexican-American from the Texas border now living that Bay Area Chicano life. Like Xorje! Each week we bring together a couple of hyphenated Latinos to talk all about personal life choices: family, careers, relationships, belonging … everything is on the table. ",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/03/Hyphenacion_FinalAssets_PodcastTile.png",
"imageAlt": "KQED Hyphenación",
"officialWebsiteLink": "/podcasts/hyphenacion",
"meta": {
"site": "news",
"source": "kqed",
"order": 15
},
"link": "/podcasts/hyphenacion",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/hyphenaci%C3%B3n/id1191591838",
"spotify": "https://open.spotify.com/show/2p3Fifq96nw9BPcmFdIq0o?si=39209f7b25774f38",
"youtube": "https://www.youtube.com/c/kqedarts",
"amazon": "https://music.amazon.com/podcasts/6c3dd23c-93fb-4aab-97ba-1725fa6315f1/hyphenaci%C3%B3n",
"rss": "https://feeds.megaphone.fm/KQINC2275451163"
}
},
"jerrybrown": {
"id": "jerrybrown",
"title": "The Political Mind of Jerry Brown",
"tagline": "Lessons from a lifetime in politics",
"info": "The Political Mind of Jerry Brown brings listeners the wisdom of the former Governor, Mayor, and presidential candidate. Scott Shafer interviewed Brown for more than 40 hours, covering the former governor's life and half-century in the political game and Brown has some lessons he'd like to share. ",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Political-Mind-of-Jerry-Brown-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Political Mind of Jerry Brown",
"officialWebsiteLink": "/podcasts/jerrybrown",
"meta": {
"site": "news",
"source": "kqed",
"order": 18
},
"link": "/podcasts/jerrybrown",
"subscribe": {
"npr": "https://www.npr.org/podcasts/790253322/the-political-mind-of-jerry-brown",
"apple": "https://itunes.apple.com/us/podcast/id1492194549",
"rss": "https://ww2.kqed.org/news/series/jerrybrown/feed/podcast/",
"tuneIn": "http://tun.in/pjGcK",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-political-mind-of-jerry-brown",
"spotify": "https://open.spotify.com/show/54C1dmuyFyKMFttY6X2j6r?si=K8SgRCoISNK6ZbjpXrX5-w",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93dzIua3FlZC5vcmcvbmV3cy9zZXJpZXMvamVycnlicm93bi9mZWVkL3BvZGNhc3Qv"
}
},
"latino-usa": {
"id": "latino-usa",
"title": "Latino USA",
"airtime": "MON 1am-2am, SUN 6pm-7pm",
"info": "Latino USA, the radio journal of news and culture, is the only national, English-language radio program produced from a Latino perspective.",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/latinoUsa.jpg",
"officialWebsiteLink": "http://latinousa.org/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/latino-usa",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/xtTd",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=79681317&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Latino-USA-p621/",
"rss": "https://feeds.npr.org/510016/podcast.xml"
}
},
"marketplace": {
"id": "marketplace",
"title": "Marketplace",
"info": "Our flagship program, helmed by Kai Ryssdal, examines what the day in money delivered, through stories, conversations, newsworthy numbers and more. Updated Monday through Friday at about 3:30 p.m. PT.",
"airtime": "MON-FRI 4pm-4:30pm, MON-WED 6:30pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Marketplace-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.marketplace.org/",
"meta": {
"site": "news",
"source": "American Public Media"
},
"link": "/radio/program/marketplace",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=201853034&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/APM-Marketplace-p88/",
"rss": "https://feeds.publicradio.org/public_feeds/marketplace-pm/rss/rss"
}
},
"masters-of-scale": {
"id": "masters-of-scale",
"title": "Masters of Scale",
"info": "Masters of Scale is an original podcast in which LinkedIn co-founder and Greylock Partner Reid Hoffman sets out to describe and prove theories that explain how great entrepreneurs take their companies from zero to a gazillion in ingenious fashion.",
"airtime": "Every other Wednesday June 12 through October 16 at 8pm (repeats Thursdays at 2am)",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Masters-of-Scale-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://mastersofscale.com/",
"meta": {
"site": "radio",
"source": "WaitWhat"
},
"link": "/radio/program/masters-of-scale",
"subscribe": {
"apple": "http://mastersofscale.app.link/",
"rss": "https://rss.art19.com/masters-of-scale"
}
},
"mindshift": {
"id": "mindshift",
"title": "MindShift",
"tagline": "A podcast about the future of learning and how we raise our kids",
"info": "The MindShift podcast explores the innovations in education that are shaping how kids learn. Hosts Ki Sung and Katrina Schwartz introduce listeners to educators, researchers, parents and students who are developing effective ways to improve how kids learn. We cover topics like how fed-up administrators are developing surprising tactics to deal with classroom disruptions; how listening to podcasts are helping kids develop reading skills; the consequences of overparenting; and why interdisciplinary learning can engage students on all ends of the traditional achievement spectrum. This podcast is part of the MindShift education site, a division of KQED News. KQED is an NPR/PBS member station based in San Francisco. You can also visit the MindShift website for episodes and supplemental blog posts or tweet us \u003ca href=\"https://twitter.com/MindShiftKQED\">@MindShiftKQED\u003c/a> or visit us at \u003ca href=\"/mindshift\">MindShift.KQED.org\u003c/a>",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Mindshift-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED MindShift: How We Will Learn",
"officialWebsiteLink": "/mindshift/",
"meta": {
"site": "news",
"source": "kqed",
"order": 12
},
"link": "/podcasts/mindshift",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/mindshift-podcast/id1078765985",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM1NzY0NjAwNDI5",
"npr": "https://www.npr.org/podcasts/464615685/mind-shift-podcast",
"stitcher": "https://www.stitcher.com/podcast/kqed/stories-teachers-share",
"spotify": "https://open.spotify.com/show/0MxSpNYZKNprFLCl7eEtyx"
}
},
"morning-edition": {
"id": "morning-edition",
"title": "Morning Edition",
"info": "\u003cem>Morning Edition\u003c/em> takes listeners around the country and the world with multi-faceted stories and commentaries every weekday. Hosts Steve Inskeep, David Greene and Rachel Martin bring you the latest breaking news and features to prepare you for the day.",
"airtime": "MON-FRI 3am-9am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Morning-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/morning-edition/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/morning-edition"
},
"onourwatch": {
"id": "onourwatch",
"title": "On Our Watch",
"tagline": "Deeply-reported investigative journalism",
"info": "For decades, the process for how police police themselves has been inconsistent – if not opaque. In some states, like California, these proceedings were completely hidden. After a new police transparency law unsealed scores of internal affairs files, our reporters set out to examine these cases and the shadow world of police discipline. On Our Watch brings listeners into the rooms where officers are questioned and witnesses are interrogated to find out who this system is really protecting. Is it the officers, or the public they've sworn to serve?",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/On-Our-Watch-Podcast-Tile-703x703-1.jpg",
"imageAlt": "On Our Watch from NPR and KQED",
"officialWebsiteLink": "/podcasts/onourwatch",
"meta": {
"site": "news",
"source": "kqed",
"order": 11
},
"link": "/podcasts/onourwatch",
"subscribe": {
"apple": "https://podcasts.apple.com/podcast/id1567098962",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5ucHIub3JnLzUxMDM2MC9wb2RjYXN0LnhtbD9zYz1nb29nbGVwb2RjYXN0cw",
"npr": "https://rpb3r.app.goo.gl/onourwatch",
"spotify": "https://open.spotify.com/show/0OLWoyizopu6tY1XiuX70x",
"tuneIn": "https://tunein.com/radio/On-Our-Watch-p1436229/",
"stitcher": "https://www.stitcher.com/show/on-our-watch",
"rss": "https://feeds.npr.org/510360/podcast.xml"
}
},
"on-the-media": {
"id": "on-the-media",
"title": "On The Media",
"info": "Our weekly podcast explores how the media 'sausage' is made, casts an incisive eye on fluctuations in the marketplace of ideas, and examines threats to the freedom of information and expression in America and abroad. For one hour a week, the show tries to lift the veil from the process of \"making media,\" especially news media, because it's through that lens that we see the world and the world sees us",
"airtime": "SUN 2pm-3pm, MON 12am-1am",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/onTheMedia.png",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/otm",
"meta": {
"site": "news",
"source": "wnyc"
},
"link": "/radio/program/on-the-media",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/on-the-media/id73330715?mt=2",
"tuneIn": "https://tunein.com/radio/On-the-Media-p69/",
"rss": "http://feeds.wnyc.org/onthemedia"
}
},
"pbs-newshour": {
"id": "pbs-newshour",
"title": "PBS NewsHour",
"info": "Analysis, background reports and updates from the PBS NewsHour putting today's news in context.",
"airtime": "MON-FRI 3pm-4pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/PBS-News-Hour-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.pbs.org/newshour/",
"meta": {
"site": "news",
"source": "pbs"
},
"link": "/radio/program/pbs-newshour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/pbs-newshour-full-show/id394432287?mt=2",
"tuneIn": "https://tunein.com/radio/PBS-NewsHour---Full-Show-p425698/",
"rss": "https://www.pbs.org/newshour/feeds/rss/podcasts/show"
}
},
"perspectives": {
"id": "perspectives",
"title": "Perspectives",
"tagline": "KQED's series of daily listener commentaries since 1991",
"info": "KQED's series of daily listener commentaries since 1991.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/01/Perspectives_Tile_Final.jpg",
"imageAlt": "KQED Perspectives",
"officialWebsiteLink": "/perspectives/",
"meta": {
"site": "radio",
"source": "kqed",
"order": 14
},
"link": "/perspectives",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/id73801135",
"npr": "https://www.npr.org/podcasts/432309616/perspectives",
"rss": "https://ww2.kqed.org/perspectives/category/perspectives/feed/",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93dzIua3FlZC5vcmcvcGVyc3BlY3RpdmVzL2NhdGVnb3J5L3BlcnNwZWN0aXZlcy9mZWVkLw"
}
},
"planet-money": {
"id": "planet-money",
"title": "Planet Money",
"info": "The economy explained. Imagine you could call up a friend and say, Meet me at the bar and tell me what's going on with the economy. Now imagine that's actually a fun evening.",
"airtime": "SUN 3pm-4pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/planetmoney.jpg",
"officialWebsiteLink": "https://www.npr.org/sections/money/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/planet-money",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/M4f5",
"apple": "https://itunes.apple.com/us/podcast/planet-money/id290783428?mt=2",
"tuneIn": "https://tunein.com/podcasts/Business--Economics-Podcasts/Planet-Money-p164680/",
"rss": "https://feeds.npr.org/510289/podcast.xml"
}
},
"politicalbreakdown": {
"id": "politicalbreakdown",
"title": "Political Breakdown",
"tagline": "Politics from a personal perspective",
"info": "Political Breakdown is a new series that explores the political intersection of California and the nation. Each week hosts Scott Shafer and Marisa Lagos are joined with a new special guest to unpack politics -- with personality — and offer an insider’s glimpse at how politics happens.",
"airtime": "THU 6:30pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Political-Breakdown-2024-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Political Breakdown",
"officialWebsiteLink": "/podcasts/politicalbreakdown",
"meta": {
"site": "radio",
"source": "kqed",
"order": 5
},
"link": "/podcasts/politicalbreakdown",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/political-breakdown/id1327641087",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM5Nzk2MzI2MTEx",
"npr": "https://www.npr.org/podcasts/572155894/political-breakdown",
"stitcher": "https://www.stitcher.com/podcast/kqed/political-breakdown",
"spotify": "https://open.spotify.com/show/07RVyIjIdk2WDuVehvBMoN",
"rss": "https://ww2.kqed.org/news/tag/political-breakdown/feed/podcast"
}
},
"possible": {
"id": "possible",
"title": "Possible",
"info": "Possible is hosted by entrepreneur Reid Hoffman and writer Aria Finger. Together in Possible, Hoffman and Finger lead enlightening discussions about building a brighter collective future. The show features interviews with visionary guests like Trevor Noah, Sam Altman and Janette Sadik-Khan. Possible paints an optimistic portrait of the world we can create through science, policy, business, art and our shared humanity. It asks: What if everything goes right for once? How can we get there? Each episode also includes a short fiction story generated by advanced AI GPT-4, serving as a thought-provoking springboard to speculate how humanity could leverage technology for good.",
"airtime": "SUN 2pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Possible-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.possible.fm/",
"meta": {
"site": "news",
"source": "Possible"
},
"link": "/radio/program/possible",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/possible/id1677184070",
"spotify": "https://open.spotify.com/show/730YpdUSNlMyPQwNnyjp4k"
}
},
"pri-the-world": {
"id": "pri-the-world",
"title": "PRI's The World: Latest Edition",
"info": "Each weekday, host Marco Werman and his team of producers bring you the world's most interesting stories in an hour of radio that reminds us just how small our planet really is.",
"airtime": "MON-FRI 2pm-3pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-World-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.pri.org/programs/the-world",
"meta": {
"site": "news",
"source": "PRI"
},
"link": "/radio/program/pri-the-world",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/pris-the-world-latest-edition/id278196007?mt=2",
"tuneIn": "https://tunein.com/podcasts/News--Politics-Podcasts/PRIs-The-World-p24/",
"rss": "http://feeds.feedburner.com/pri/theworld"
}
},
"radiolab": {
"id": "radiolab",
"title": "Radiolab",
"info": "A two-time Peabody Award-winner, Radiolab is an investigation told through sounds and stories, and centered around one big idea. In the Radiolab world, information sounds like music and science and culture collide. Hosted by Jad Abumrad and Robert Krulwich, the show is designed for listeners who demand skepticism, but appreciate wonder. WNYC Studios is the producer of other leading podcasts including Freakonomics Radio, Death, Sex & Money, On the Media and many more.",
"airtime": "SUN 12am-1am, SAT 2pm-3pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/radiolab1400.png",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/radiolab/",
"meta": {
"site": "science",
"source": "WNYC"
},
"link": "/radio/program/radiolab",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/radiolab/id152249110?mt=2",
"tuneIn": "https://tunein.com/radio/RadioLab-p68032/",
"rss": "https://feeds.wnyc.org/radiolab"
}
},
"reveal": {
"id": "reveal",
"title": "Reveal",
"info": "Created by The Center for Investigative Reporting and PRX, Reveal is public radios first one-hour weekly radio show and podcast dedicated to investigative reporting. Credible, fact based and without a partisan agenda, Reveal combines the power and artistry of driveway moment storytelling with data-rich reporting on critically important issues. The result is stories that inform and inspire, arming our listeners with information to right injustices, hold the powerful accountable and improve lives.Reveal is hosted by Al Letson and showcases the award-winning work of CIR and newsrooms large and small across the nation. In a radio and podcast market crowded with choices, Reveal focuses on important and often surprising stories that illuminate the world for our listeners.",
"airtime": "SAT 4pm-5pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/reveal300px.png",
"officialWebsiteLink": "https://www.revealnews.org/episodes/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/reveal",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/reveal/id886009669",
"tuneIn": "https://tunein.com/radio/Reveal-p679597/",
"rss": "http://feeds.revealradio.org/revealpodcast"
}
},
"rightnowish": {
"id": "rightnowish",
"title": "Rightnowish",
"tagline": "Art is where you find it",
"info": "Rightnowish digs into life in the Bay Area right now… ish. Journalist Pendarvis Harshaw takes us to galleries painted on the sides of liquor stores in West Oakland. We'll dance in warehouses in the Bayview, make smoothies with kids in South Berkeley, and listen to classical music in a 1984 Cutlass Supreme in Richmond. Every week, Pen talks to movers and shakers about how the Bay Area shapes what they create, and how they shape the place we call home.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Rightnowish-Podcast-Tile-500x500-1.jpg",
"imageAlt": "KQED Rightnowish with Pendarvis Harshaw",
"officialWebsiteLink": "/podcasts/rightnowish",
"meta": {
"site": "arts",
"source": "kqed",
"order": 16
},
"link": "/podcasts/rightnowish",
"subscribe": {
"npr": "https://www.npr.org/podcasts/721590300/rightnowish",
"rss": "https://ww2.kqed.org/arts/programs/rightnowish/feed/podcast",
"apple": "https://podcasts.apple.com/us/podcast/rightnowish/id1482187648",
"stitcher": "https://www.stitcher.com/podcast/kqed/rightnowish",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkMxMjU5MTY3NDc4",
"spotify": "https://open.spotify.com/show/7kEJuafTzTVan7B78ttz1I"
}
},
"science-friday": {
"id": "science-friday",
"title": "Science Friday",
"info": "Science Friday is a weekly science talk show, broadcast live over public radio stations nationwide. Each week, the show focuses on science topics that are in the news and tries to bring an educated, balanced discussion to bear on the scientific issues at hand. Panels of expert guests join host Ira Flatow, a veteran science journalist, to discuss science and to take questions from listeners during the call-in portion of the program.",
"airtime": "FRI 11am-1pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Science-Friday-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/science-friday",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/science-friday",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=73329284&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Science-Friday-p394/",
"rss": "http://feeds.wnyc.org/science-friday"
}
},
"snap-judgment": {
"id": "snap-judgment",
"title": "Snap Judgment",
"tagline": "Real stories with killer beats",
"info": "The Snap Judgment radio show and podcast mixes real stories with killer beats to produce cinematic, dramatic radio. Snap's musical brand of storytelling dares listeners to see the world through the eyes of another. This is storytelling... with a BEAT!! Snap first aired on public radio stations nationwide in July 2010. Today, Snap Judgment airs on over 450 public radio stations and is brought to the airwaves by KQED & PRX.",
"airtime": "SAT 1pm-2pm, 9pm-10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/05/Snap-Judgment-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Snap Judgment",
"officialWebsiteLink": "https://snapjudgment.org",
"meta": {
"site": "arts",
"source": "kqed",
"order": 4
},
"link": "https://snapjudgment.org",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/snap-judgment/id283657561",
"npr": "https://www.npr.org/podcasts/449018144/snap-judgment",
"stitcher": "https://www.pandora.com/podcast/snap-judgment/PC:241?source=stitcher-sunset",
"spotify": "https://open.spotify.com/show/3Cct7ZWmxHNAtLgBTqjC5v",
"rss": "https://snap.feed.snapjudgment.org/"
}
},
"soldout": {
"id": "soldout",
"title": "SOLD OUT: Rethinking Housing in America",
"tagline": "A new future for housing",
"info": "Sold Out: Rethinking Housing in America",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Sold-Out-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Sold Out: Rethinking Housing in America",
"officialWebsiteLink": "/podcasts/soldout",
"meta": {
"site": "news",
"source": "kqed",
"order": 13
},
"link": "/podcasts/soldout",
"subscribe": {
"npr": "https://www.npr.org/podcasts/911586047/s-o-l-d-o-u-t-a-new-future-for-housing",
"apple": "https://podcasts.apple.com/us/podcast/introducing-sold-out-rethinking-housing-in-america/id1531354937",
"rss": "https://feeds.megaphone.fm/soldout",
"spotify": "https://open.spotify.com/show/38dTBSk2ISFoPiyYNoKn1X",
"stitcher": "https://www.stitcher.com/podcast/kqed/sold-out-rethinking-housing-in-america",
"tunein": "https://tunein.com/radio/SOLD-OUT-Rethinking-Housing-in-America-p1365871/",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vc29sZG91dA"
}
},
"spooked": {
"id": "spooked",
"title": "Spooked",
"tagline": "True-life supernatural stories",
"info": "",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/10/Spooked-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Spooked",
"officialWebsiteLink": "https://spookedpodcast.org/",
"meta": {
"site": "news",
"source": "kqed",
"order": 7
},
"link": "https://spookedpodcast.org/",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/spooked/id1279361017",
"npr": "https://www.npr.org/podcasts/549547848/snap-judgment-presents-spooked",
"spotify": "https://open.spotify.com/show/76571Rfl3m7PLJQZKQIGCT",
"rss": "https://feeds.simplecast.com/TBotaapn"
}
},
"tech-nation": {
"id": "tech-nation",
"title": "Tech Nation Radio Podcast",
"info": "Tech Nation is a weekly public radio program, hosted by Dr. Moira Gunn. Founded in 1993, it has grown from a simple interview show to a multi-faceted production, featuring conversations with noted technology and science leaders, and a weekly science and technology-related commentary.",
"airtime": "FRI 10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Tech-Nation-Radio-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "http://technation.podomatic.com/",
"meta": {
"site": "science",
"source": "Tech Nation Media"
},
"link": "/radio/program/tech-nation",
"subscribe": {
"rss": "https://technation.podomatic.com/rss2.xml"
}
},
"ted-radio-hour": {
"id": "ted-radio-hour",
"title": "TED Radio Hour",
"info": "The TED Radio Hour is a journey through fascinating ideas, astonishing inventions, fresh approaches to old problems, and new ways to think and create.",
"airtime": "SUN 3pm-4pm, SAT 10pm-11pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/tedRadioHour.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/ted-radio-hour/?showDate=2018-06-22",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/ted-radio-hour",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/8vsS",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=523121474&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/TED-Radio-Hour-p418021/",
"rss": "https://feeds.npr.org/510298/podcast.xml"
}
},
"thebay": {
"id": "thebay",
"title": "The Bay",
"tagline": "Local news to keep you rooted",
"info": "Host Devin Katayama walks you through the biggest story of the day with reporters and newsmakers.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Bay-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Bay",
"officialWebsiteLink": "/podcasts/thebay",
"meta": {
"site": "radio",
"source": "kqed",
"order": 2
},
"link": "/podcasts/thebay",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-bay/id1350043452",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM4MjU5Nzg2MzI3",
"npr": "https://www.npr.org/podcasts/586725995/the-bay",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-bay",
"spotify": "https://open.spotify.com/show/4BIKBKIujizLHlIlBNaAqQ",
"rss": "https://feeds.megaphone.fm/KQINC8259786327"
}
},
"thelatest": {
"id": "thelatest",
"title": "The Latest",
"tagline": "Trusted local news in real time",
"info": "",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/05/The-Latest-2025-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Latest",
"officialWebsiteLink": "/thelatest",
"meta": {
"site": "news",
"source": "kqed",
"order": 6
},
"link": "/thelatest",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-latest-from-kqed/id1197721799",
"npr": "https://www.npr.org/podcasts/1257949365/the-latest-from-k-q-e-d",
"spotify": "https://open.spotify.com/show/5KIIXMgM9GTi5AepwOYvIZ?si=bd3053fec7244dba",
"rss": "https://feeds.megaphone.fm/KQINC9137121918"
}
},
"theleap": {
"id": "theleap",
"title": "The Leap",
"tagline": "What if you closed your eyes, and jumped?",
"info": "Stories about people making dramatic, risky changes, told by award-winning public radio reporter Judy Campbell.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Leap-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Leap",
"officialWebsiteLink": "/podcasts/theleap",
"meta": {
"site": "news",
"source": "kqed",
"order": 17
},
"link": "/podcasts/theleap",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-leap/id1046668171",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM0NTcwODQ2MjY2",
"npr": "https://www.npr.org/podcasts/447248267/the-leap",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-leap",
"spotify": "https://open.spotify.com/show/3sSlVHHzU0ytLwuGs1SD1U",
"rss": "https://ww2.kqed.org/news/programs/the-leap/feed/podcast"
}
},
"the-moth-radio-hour": {
"id": "the-moth-radio-hour",
"title": "The Moth Radio Hour",
"info": "Since its launch in 1997, The Moth has presented thousands of true stories, told live and without notes, to standing-room-only crowds worldwide. Moth storytellers stand alone, under a spotlight, with only a microphone and a roomful of strangers. The storyteller and the audience embark on a high-wire act of shared experience which is both terrifying and exhilarating. Since 2008, The Moth podcast has featured many of our favorite stories told live on Moth stages around the country. For information on all of our programs and live events, visit themoth.org.",
"airtime": "SAT 8pm-9pm and SUN 11am-12pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/theMoth.jpg",
"officialWebsiteLink": "https://themoth.org/",
"meta": {
"site": "arts",
"source": "prx"
},
"link": "/radio/program/the-moth-radio-hour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/the-moth-podcast/id275699983?mt=2",
"tuneIn": "https://tunein.com/radio/The-Moth-p273888/",
"rss": "http://feeds.themoth.org/themothpodcast"
}
},
"the-new-yorker-radio-hour": {
"id": "the-new-yorker-radio-hour",
"title": "The New Yorker Radio Hour",
"info": "The New Yorker Radio Hour is a weekly program presented by the magazine's editor, David Remnick, and produced by WNYC Studios and The New Yorker. Each episode features a diverse mix of interviews, profiles, storytelling, and an occasional burst of humor inspired by the magazine, and shaped by its writers, artists, and editors. This isn't a radio version of a magazine, but something all its own, reflecting the rich possibilities of audio storytelling and conversation. Theme music for the show was composed and performed by Merrill Garbus of tUnE-YArDs.",
"airtime": "SAT 10am-11am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-New-Yorker-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/tnyradiohour",
"meta": {
"site": "arts",
"source": "WNYC"
},
"link": "/radio/program/the-new-yorker-radio-hour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/id1050430296",
"tuneIn": "https://tunein.com/podcasts/WNYC-Podcasts/New-Yorker-Radio-Hour-p803804/",
"rss": "https://feeds.feedburner.com/newyorkerradiohour"
}
},
"the-sam-sanders-show": {
"id": "the-sam-sanders-show",
"title": "The Sam Sanders Show",
"info": "One of public radio's most dynamic voices, Sam Sanders helped launch The NPR Politics Podcast and hosted NPR's hit show It's Been A Minute. Now, the award-winning host returns with something brand new, The Sam Sanders Show. Every week, Sam Sanders and friends dig into the culture that shapes our lives: what's driving the biggest trends, how artists really think, and even the memes you can't stop scrolling past. Sam is beloved for his way of unpacking the world and bringing you up close to fresh currents and engaging conversations. The Sam Sanders Show is smart, funny and always a good time.",
"airtime": "FRI 12-1pm AND SAT 11am-12pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/11/The-Sam-Sanders-Show-Podcast-Tile-400x400-1.jpg",
"officialWebsiteLink": "https://www.kcrw.com/shows/the-sam-sanders-show/latest",
"meta": {
"site": "arts",
"source": "KCRW"
},
"link": "https://www.kcrw.com/shows/the-sam-sanders-show/latest",
"subscribe": {
"rss": "https://feed.cdnstream1.com/zjb/feed/download/ac/28/59/ac28594c-e1d0-4231-8728-61865cdc80e8.xml"
}
},
"the-splendid-table": {
"id": "the-splendid-table",
"title": "The Splendid Table",
"info": "\u003cem>The Splendid Table\u003c/em> hosts our nation's conversations about cooking, sustainability and food culture.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Splendid-Table-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.splendidtable.org/",
"airtime": "SUN 10-11 pm",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/the-splendid-table"
},
"this-american-life": {
"id": "this-american-life",
"title": "This American Life",
"info": "This American Life is a weekly public radio show, heard by 2.2 million people on more than 500 stations. Another 2.5 million people download the weekly podcast. It is hosted by Ira Glass, produced in collaboration with Chicago Public Media, delivered to stations by PRX The Public Radio Exchange, and has won all of the major broadcasting awards.",
"airtime": "SAT 12pm-1pm, 7pm-8pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/thisAmericanLife.png",
"officialWebsiteLink": "https://www.thisamericanlife.org/",
"meta": {
"site": "news",
"source": "wbez"
},
"link": "/radio/program/this-american-life",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=201671138&at=11l79Y&ct=nprdirectory",
"rss": "https://www.thisamericanlife.org/podcast/rss.xml"
}
},
"tinydeskradio": {
"id": "tinydeskradio",
"title": "Tiny Desk Radio",
"info": "We're bringing the best of Tiny Desk to the airwaves, only on public radio.",
"airtime": "SUN 8pm and SAT 9pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/04/300x300-For-Member-Station-Logo-Tiny-Desk-Radio-@2x.png",
"officialWebsiteLink": "https://www.npr.org/series/g-s1-52030/tiny-desk-radio",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/tinydeskradio",
"subscribe": {
"rss": "https://feeds.npr.org/g-s1-52030/rss.xml"
}
},
"wait-wait-dont-tell-me": {
"id": "wait-wait-dont-tell-me",
"title": "Wait Wait... Don't Tell Me!",
"info": "Peter Sagal and Bill Kurtis host the weekly NPR News quiz show alongside some of the best and brightest news and entertainment personalities.",
"airtime": "SUN 10am-11am, SAT 11am-12pm, SAT 6pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Wait-Wait-Podcast-Tile-300x300-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/wait-wait-dont-tell-me/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/wait-wait-dont-tell-me",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/Xogv",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=121493804&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Wait-Wait-Dont-Tell-Me-p46/",
"rss": "https://feeds.npr.org/344098539/podcast.xml"
}
},
"weekend-edition-saturday": {
"id": "weekend-edition-saturday",
"title": "Weekend Edition Saturday",
"info": "Weekend Edition Saturday wraps up the week's news and offers a mix of analysis and features on a wide range of topics, including arts, sports, entertainment, and human interest stories. The two-hour program is hosted by NPR's Peabody Award-winning Scott Simon.",
"airtime": "SAT 5am-10am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Weekend-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/weekend-edition-saturday/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/weekend-edition-saturday"
},
"weekend-edition-sunday": {
"id": "weekend-edition-sunday",
"title": "Weekend Edition Sunday",
"info": "Weekend Edition Sunday features interviews with newsmakers, artists, scientists, politicians, musicians, writers, theologians and historians. The program has covered news events from Nelson Mandela's 1990 release from a South African prison to the capture of Saddam Hussein.",
"airtime": "SUN 5am-10am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Weekend-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/weekend-edition-sunday/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/weekend-edition-sunday"
}
},
"racesReducer": {},
"racesGenElectionReducer": {},
"radioSchedulesReducer": {},
"listsReducer": {
"posts/news?tag=technology": {
"isFetching": false,
"latestQuery": {
"from": 0,
"postsToRender": 9
},
"tag": null,
"vitalsOnly": true,
"totalRequested": 9,
"isLoading": false,
"isLoadingMore": true,
"total": {
"value": 219,
"relation": "eq"
},
"items": [
"news_12082478",
"news_12081916",
"news_12081798",
"news_12081721",
"news_12081603",
"news_12081290",
"news_12081336",
"news_12081279",
"news_12080824"
]
}
},
"recallGuideReducer": {
"intros": {},
"policy": {},
"candidates": {}
},
"savedArticleReducer": {
"articles": [],
"status": {}
},
"pfsSessionReducer": {},
"subscriptionsReducer": {},
"termsReducer": {
"about": {
"name": "About",
"type": "terms",
"id": "about",
"slug": "about",
"link": "/about",
"taxonomy": "site"
},
"arts": {
"name": "Arts & Culture",
"grouping": [
"arts",
"pop",
"trulyca"
],
"description": "KQED Arts provides daily in-depth coverage of the Bay Area's music, art, film, performing arts, literature and arts news, as well as cultural commentary and criticism.",
"type": "terms",
"id": "arts",
"slug": "arts",
"link": "/arts",
"taxonomy": "site"
},
"artschool": {
"name": "Art School",
"parent": "arts",
"type": "terms",
"id": "artschool",
"slug": "artschool",
"link": "/artschool",
"taxonomy": "site"
},
"bayareabites": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"parent": "food",
"type": "terms",
"id": "bayareabites",
"slug": "bayareabites",
"link": "/food",
"taxonomy": "site"
},
"bayareahiphop": {
"name": "Bay Area Hiphop",
"type": "terms",
"id": "bayareahiphop",
"slug": "bayareahiphop",
"link": "/bayareahiphop",
"taxonomy": "site"
},
"campaign21": {
"name": "Campaign 21",
"type": "terms",
"id": "campaign21",
"slug": "campaign21",
"link": "/campaign21",
"taxonomy": "site"
},
"checkplease": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"parent": "food",
"type": "terms",
"id": "checkplease",
"slug": "checkplease",
"link": "/food",
"taxonomy": "site"
},
"education": {
"name": "Education",
"grouping": [
"education"
],
"type": "terms",
"id": "education",
"slug": "education",
"link": "/education",
"taxonomy": "site"
},
"elections": {
"name": "Elections",
"type": "terms",
"id": "elections",
"slug": "elections",
"link": "/elections",
"taxonomy": "site"
},
"events": {
"name": "Events",
"type": "terms",
"id": "events",
"slug": "events",
"link": "/events",
"taxonomy": "site"
},
"event": {
"name": "Event",
"alias": "events",
"type": "terms",
"id": "event",
"slug": "event",
"link": "/event",
"taxonomy": "site"
},
"filmschoolshorts": {
"name": "Film School Shorts",
"type": "terms",
"id": "filmschoolshorts",
"slug": "filmschoolshorts",
"link": "/filmschoolshorts",
"taxonomy": "site"
},
"food": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"type": "terms",
"id": "food",
"slug": "food",
"link": "/food",
"taxonomy": "site"
},
"forum": {
"name": "Forum",
"relatedContentQuery": "posts/forum?",
"parent": "news",
"type": "terms",
"id": "forum",
"slug": "forum",
"link": "/forum",
"taxonomy": "site"
},
"futureofyou": {
"name": "Future of You",
"grouping": [
"science",
"futureofyou"
],
"parent": "science",
"type": "terms",
"id": "futureofyou",
"slug": "futureofyou",
"link": "/futureofyou",
"taxonomy": "site"
},
"jpepinheart": {
"name": "KQED food",
"relatedContentQuery": "posts/food,bayareabites,checkplease",
"parent": "food",
"type": "terms",
"id": "jpepinheart",
"slug": "jpepinheart",
"link": "/food",
"taxonomy": "site"
},
"liveblog": {
"name": "Live Blog",
"type": "terms",
"id": "liveblog",
"slug": "liveblog",
"link": "/liveblog",
"taxonomy": "site"
},
"livetv": {
"name": "Live TV",
"parent": "tv",
"type": "terms",
"id": "livetv",
"slug": "livetv",
"link": "/livetv",
"taxonomy": "site"
},
"lowdown": {
"name": "The Lowdown",
"relatedContentQuery": "posts/lowdown?",
"parent": "news",
"type": "terms",
"id": "lowdown",
"slug": "lowdown",
"link": "/lowdown",
"taxonomy": "site"
},
"mindshift": {
"name": "Mindshift",
"parent": "news",
"description": "MindShift explores the future of education by highlighting the innovative – and sometimes counterintuitive – ways educators and parents are helping all children succeed.",
"type": "terms",
"id": "mindshift",
"slug": "mindshift",
"link": "/mindshift",
"taxonomy": "site"
},
"news": {
"name": "News",
"grouping": [
"news",
"forum"
],
"type": "terms",
"id": "news",
"slug": "news",
"link": "/news",
"taxonomy": "site"
},
"perspectives": {
"name": "Perspectives",
"parent": "radio",
"type": "terms",
"id": "perspectives",
"slug": "perspectives",
"link": "/perspectives",
"taxonomy": "site"
},
"podcasts": {
"name": "Podcasts",
"type": "terms",
"id": "podcasts",
"slug": "podcasts",
"link": "/podcasts",
"taxonomy": "site"
},
"pop": {
"name": "Pop",
"parent": "arts",
"type": "terms",
"id": "pop",
"slug": "pop",
"link": "/pop",
"taxonomy": "site"
},
"pressroom": {
"name": "Pressroom",
"type": "terms",
"id": "pressroom",
"slug": "pressroom",
"link": "/pressroom",
"taxonomy": "site"
},
"quest": {
"name": "Quest",
"parent": "science",
"type": "terms",
"id": "quest",
"slug": "quest",
"link": "/quest",
"taxonomy": "site"
},
"radio": {
"name": "Radio",
"grouping": [
"forum",
"perspectives"
],
"description": "Listen to KQED Public Radio – home of Forum and The California Report – on 88.5 FM in San Francisco, 89.3 FM in Sacramento, 88.3 FM in Santa Rosa and 88.1 FM in Martinez.",
"type": "terms",
"id": "radio",
"slug": "radio",
"link": "/radio",
"taxonomy": "site"
},
"root": {
"name": "KQED",
"image": "https://ww2.kqed.org/app/uploads/2020/02/KQED-OG-Image@1x.png",
"imageWidth": 1200,
"imageHeight": 630,
"headData": {
"title": "KQED | News, Radio, Podcasts, TV | Public Media for Northern California",
"description": "KQED provides public radio, television, and independent reporting on issues that matter to the Bay Area. We’re the NPR and PBS member station for Northern California."
},
"type": "terms",
"id": "root",
"slug": "root",
"link": "/root",
"taxonomy": "site"
},
"science": {
"name": "Science",
"grouping": [
"science",
"futureofyou"
],
"description": "KQED Science brings you award-winning science and environment coverage from the Bay Area and beyond.",
"type": "terms",
"id": "science",
"slug": "science",
"link": "/science",
"taxonomy": "site"
},
"stateofhealth": {
"name": "State of Health",
"parent": "science",
"type": "terms",
"id": "stateofhealth",
"slug": "stateofhealth",
"link": "/stateofhealth",
"taxonomy": "site"
},
"support": {
"name": "Support",
"type": "terms",
"id": "support",
"slug": "support",
"link": "/support",
"taxonomy": "site"
},
"thedolist": {
"name": "The Do List",
"parent": "arts",
"type": "terms",
"id": "thedolist",
"slug": "thedolist",
"link": "/thedolist",
"taxonomy": "site"
},
"trulyca": {
"name": "Truly CA",
"grouping": [
"arts",
"pop",
"trulyca"
],
"parent": "arts",
"type": "terms",
"id": "trulyca",
"slug": "trulyca",
"link": "/trulyca",
"taxonomy": "site"
},
"tv": {
"name": "TV",
"type": "terms",
"id": "tv",
"slug": "tv",
"link": "/tv",
"taxonomy": "site"
},
"voterguide": {
"name": "Voter Guide",
"parent": "elections",
"alias": "elections",
"type": "terms",
"id": "voterguide",
"slug": "voterguide",
"link": "/voterguide",
"taxonomy": "site"
},
"guiaelectoral": {
"name": "Guia Electoral",
"parent": "elections",
"alias": "elections",
"type": "terms",
"id": "guiaelectoral",
"slug": "guiaelectoral",
"link": "/guiaelectoral",
"taxonomy": "site"
},
"news_1631": {
"type": "terms",
"id": "news_1631",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "1631",
"found": true
},
"relationships": {},
"name": "Technology",
"slug": "technology",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Technology | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null,
"imageData": {
"ogImageSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"width": 1200,
"height": 630
},
"twImageSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png"
},
"twitterCard": "summary_large_image"
}
},
"ttid": 1643,
"isLoading": false,
"link": "/news/tag/technology"
},
"source_news_12082478": {
"type": "terms",
"id": "source_news_12082478",
"meta": {
"override": true
},
"name": "Close All Tabs",
"link": "https://www.kqed.org/podcasts/closealltabs",
"isLoading": false
},
"source_news_12081721": {
"type": "terms",
"id": "source_news_12081721",
"meta": {
"override": true
},
"name": "Close All Tabs",
"link": "https://www.kqed.org/podcasts/closealltabs",
"isLoading": false
},
"source_news_12080824": {
"type": "terms",
"id": "source_news_12080824",
"meta": {
"override": true
},
"name": "Close All Tabs",
"link": "https://www.kqed.org/podcasts/closealltabs",
"isLoading": false
},
"news_35082": {
"type": "terms",
"id": "news_35082",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "35082",
"found": true
},
"relationships": {},
"name": "Close All Tabs",
"slug": "close-all-tabs",
"taxonomy": "program",
"description": null,
"featImg": null,
"headData": {
"title": "Close All Tabs | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 35099,
"isLoading": false,
"link": "/news/program/close-all-tabs"
},
"news_33520": {
"type": "terms",
"id": "news_33520",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33520",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Podcast",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Podcast Archives | KQED News",
"ogDescription": null
},
"ttid": 33537,
"slug": "podcast",
"isLoading": false,
"link": "/news/category/podcast"
},
"news_25184": {
"type": "terms",
"id": "news_25184",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "25184",
"found": true
},
"relationships": {},
"featImg": null,
"name": "AI",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "AI Archives | KQED News",
"ogDescription": null
},
"ttid": 25201,
"slug": "ai",
"isLoading": false,
"link": "/news/tag/ai"
},
"news_36279": {
"type": "terms",
"id": "news_36279",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "36279",
"found": true
},
"relationships": {},
"name": "chatbot",
"slug": "chatbot",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "chatbot | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 36296,
"isLoading": false,
"link": "/news/tag/chatbot"
},
"news_22973": {
"type": "terms",
"id": "news_22973",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "22973",
"found": true
},
"relationships": {},
"featImg": null,
"name": "culture",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "culture Archives | KQED News",
"ogDescription": null
},
"ttid": 22990,
"slug": "culture",
"isLoading": false,
"link": "/news/tag/culture"
},
"news_3137": {
"type": "terms",
"id": "news_3137",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "3137",
"found": true
},
"relationships": {},
"featImg": null,
"name": "internet",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "internet Archives | KQED News",
"ogDescription": null
},
"ttid": 3155,
"slug": "internet",
"isLoading": false,
"link": "/news/tag/internet"
},
"news_34646": {
"type": "terms",
"id": "news_34646",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34646",
"found": true
},
"relationships": {},
"name": "internet culture",
"slug": "internet-culture",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "internet culture | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 34663,
"isLoading": false,
"link": "/news/tag/internet-culture"
},
"news_2109": {
"type": "terms",
"id": "news_2109",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "2109",
"found": true
},
"relationships": {},
"featImg": null,
"name": "mental health",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "mental health Archives | KQED News",
"ogDescription": null
},
"ttid": 2124,
"slug": "mental-health",
"isLoading": false,
"link": "/news/tag/mental-health"
},
"news_20782": {
"type": "terms",
"id": "news_20782",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "20782",
"found": true
},
"relationships": {},
"featImg": null,
"name": "therapy",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "therapy Archives | KQED News",
"ogDescription": null
},
"ttid": 20799,
"slug": "therapy",
"isLoading": false,
"link": "/news/tag/therapy"
},
"news_33732": {
"type": "terms",
"id": "news_33732",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33732",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Technology",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Technology Archives | KQED News",
"ogDescription": null
},
"ttid": 33749,
"slug": "technology",
"isLoading": false,
"link": "/news/interest/technology"
},
"news_28250": {
"type": "terms",
"id": "news_28250",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "28250",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Local",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Local Archives | KQED News",
"ogDescription": null
},
"ttid": 28267,
"slug": "local",
"isLoading": false,
"link": "/news/category/local"
},
"news_8": {
"type": "terms",
"id": "news_8",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "8",
"found": true
},
"relationships": {},
"featImg": null,
"name": "News",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "News Archives | KQED News",
"ogDescription": null
},
"ttid": 8,
"slug": "news",
"isLoading": false,
"link": "/news/category/news"
},
"news_248": {
"type": "terms",
"id": "news_248",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "248",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Technology",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Technology Archives | KQED News",
"ogDescription": null
},
"ttid": 256,
"slug": "technology",
"isLoading": false,
"link": "/news/category/technology"
},
"news_1386": {
"type": "terms",
"id": "news_1386",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "1386",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Bay Area",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Bay Area Archives | KQED News",
"ogDescription": null
},
"ttid": 1398,
"slug": "bay-area",
"isLoading": false,
"link": "/news/tag/bay-area"
},
"news_3897": {
"type": "terms",
"id": "news_3897",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "3897",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Elon Musk",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Elon Musk Archives | KQED News",
"ogDescription": null
},
"ttid": 3916,
"slug": "elon-musk",
"isLoading": false,
"link": "/news/tag/elon-musk"
},
"news_27626": {
"type": "terms",
"id": "news_27626",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "27626",
"found": true
},
"relationships": {},
"featImg": null,
"name": "featured-news",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "featured-news Archives | KQED News",
"ogDescription": null
},
"ttid": 27643,
"slug": "featured-news",
"isLoading": false,
"link": "/news/tag/featured-news"
},
"news_34054": {
"type": "terms",
"id": "news_34054",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34054",
"found": true
},
"relationships": {},
"featImg": null,
"name": "oakland",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "oakland Archives | KQED News",
"ogDescription": null
},
"ttid": 34071,
"slug": "oakland",
"isLoading": false,
"link": "/news/tag/oakland"
},
"news_33542": {
"type": "terms",
"id": "news_33542",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33542",
"found": true
},
"relationships": {},
"featImg": null,
"name": "OpenAI",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "OpenAI Archives | KQED News",
"ogDescription": null
},
"ttid": 33559,
"slug": "openai",
"isLoading": false,
"link": "/news/tag/openai"
},
"news_33543": {
"type": "terms",
"id": "news_33543",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33543",
"found": true
},
"relationships": {},
"name": "Sam Altman",
"slug": "sam-altman",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Sam Altman | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null,
"metaRobotsNoIndex": "noindex"
},
"ttid": 33560,
"isLoading": false,
"link": "/news/tag/sam-altman"
},
"news_34586": {
"type": "terms",
"id": "news_34586",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34586",
"found": true
},
"relationships": {},
"name": "Silicon Valley",
"slug": "silicon-valley",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Silicon Valley | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 34603,
"isLoading": false,
"link": "/news/tag/silicon-valley"
},
"news_33733": {
"type": "terms",
"id": "news_33733",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33733",
"found": true
},
"relationships": {},
"featImg": null,
"name": "News",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "News Archives | KQED News",
"ogDescription": null
},
"ttid": 33750,
"slug": "news",
"isLoading": false,
"link": "/news/interest/news"
},
"news_33730": {
"type": "terms",
"id": "news_33730",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33730",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Oakland",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Oakland Archives | KQED News",
"ogDescription": null
},
"ttid": 33747,
"slug": "oakland",
"isLoading": false,
"link": "/news/interest/oakland"
},
"news_31795": {
"type": "terms",
"id": "news_31795",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "31795",
"found": true
},
"relationships": {},
"featImg": null,
"name": "California",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "California Archives | KQED News",
"ogDescription": null
},
"ttid": 31812,
"slug": "california",
"isLoading": false,
"link": "/news/category/california"
},
"news_6188": {
"type": "terms",
"id": "news_6188",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "6188",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Law and Justice",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Law and Justice Archives | KQED News",
"ogDescription": null
},
"ttid": 6212,
"slug": "law-and-justice",
"isLoading": false,
"link": "/news/category/law-and-justice"
},
"news_34755": {
"type": "terms",
"id": "news_34755",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34755",
"found": true
},
"relationships": {},
"name": "artificial intelligence",
"slug": "artificial-intelligence",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "artificial intelligence | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 34772,
"isLoading": false,
"link": "/news/tag/artificial-intelligence"
},
"news_32668": {
"type": "terms",
"id": "news_32668",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "32668",
"found": true
},
"relationships": {},
"featImg": null,
"name": "ChatGPT",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "ChatGPT Archives | KQED News",
"ogDescription": null
},
"ttid": 32685,
"slug": "chatgpt",
"isLoading": false,
"link": "/news/tag/chatgpt"
},
"news_19954": {
"type": "terms",
"id": "news_19954",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "19954",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Law and Justice",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Law and Justice Archives | KQED News",
"ogDescription": null
},
"ttid": 19971,
"slug": "law-and-justice",
"isLoading": false,
"link": "/news/tag/law-and-justice"
},
"news_21891": {
"type": "terms",
"id": "news_21891",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "21891",
"found": true
},
"relationships": {},
"featImg": null,
"name": "lawsuits",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "lawsuits Archives | KQED News",
"ogDescription": null
},
"ttid": 21908,
"slug": "lawsuits",
"isLoading": false,
"link": "/news/tag/lawsuits"
},
"news_57": {
"type": "terms",
"id": "news_57",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "57",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Tesla",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Tesla Archives | KQED News",
"ogDescription": null
},
"ttid": 57,
"slug": "tesla",
"isLoading": false,
"link": "/news/tag/tesla"
},
"news_17619": {
"type": "terms",
"id": "news_17619",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "17619",
"found": true
},
"relationships": {},
"featImg": null,
"name": "cybersecurity",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "cybersecurity Archives | KQED News",
"ogDescription": null
},
"ttid": 17653,
"slug": "cybersecurity",
"isLoading": false,
"link": "/news/tag/cybersecurity"
},
"news_22844": {
"type": "terms",
"id": "news_22844",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "22844",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Data Privacy",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Data Privacy Archives | KQED News",
"ogDescription": null
},
"ttid": 22861,
"slug": "data-privacy",
"isLoading": false,
"link": "/news/tag/data-privacy"
},
"news_2414": {
"type": "terms",
"id": "news_2414",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "2414",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Internet Privacy",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Internet Privacy Archives | KQED News",
"ogDescription": null
},
"ttid": 2429,
"slug": "internet-privacy",
"isLoading": false,
"link": "/news/tag/internet-privacy"
},
"news_2125": {
"type": "terms",
"id": "news_2125",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "2125",
"found": true
},
"relationships": {},
"featImg": null,
"name": "online privacy",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "online privacy Archives | KQED News",
"ogDescription": null
},
"ttid": 2140,
"slug": "online-privacy",
"isLoading": false,
"link": "/news/tag/online-privacy"
},
"news_1859": {
"type": "terms",
"id": "news_1859",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "1859",
"found": true
},
"relationships": {},
"featImg": null,
"name": "privacy",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "privacy Archives | KQED News",
"ogDescription": null
},
"ttid": 1874,
"slug": "privacy",
"isLoading": false,
"link": "/news/tag/privacy"
},
"news_18352": {
"type": "terms",
"id": "news_18352",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "18352",
"found": true
},
"relationships": {},
"featImg": null,
"name": "East Bay",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "East Bay Archives | KQED News",
"ogDescription": null
},
"ttid": 18386,
"slug": "east-bay",
"isLoading": false,
"link": "/news/tag/east-bay"
},
"news_18538": {
"type": "terms",
"id": "news_18538",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "18538",
"found": true
},
"relationships": {},
"featImg": null,
"name": "California",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "California Archives | KQED News",
"ogDescription": null
},
"ttid": 31,
"slug": "california",
"isLoading": false,
"link": "/news/tag/california"
},
"news_23052": {
"type": "terms",
"id": "news_23052",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "23052",
"found": true
},
"relationships": {},
"featImg": null,
"name": "fraud",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "fraud Archives | KQED News",
"ogDescription": null
},
"ttid": 23069,
"slug": "fraud",
"isLoading": false,
"link": "/news/tag/fraud"
},
"news_38": {
"type": "terms",
"id": "news_38",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "38",
"found": true
},
"relationships": {},
"featImg": null,
"name": "San Francisco",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "San Francisco Archives | KQED News",
"ogDescription": null
},
"ttid": 58,
"slug": "san-francisco",
"isLoading": false,
"link": "/news/tag/san-francisco"
},
"news_33738": {
"type": "terms",
"id": "news_33738",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33738",
"found": true
},
"relationships": {},
"featImg": null,
"name": "California",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "California Archives | KQED News",
"ogDescription": null
},
"ttid": 33755,
"slug": "california",
"isLoading": false,
"link": "/news/interest/california"
},
"news_18540": {
"type": "terms",
"id": "news_18540",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "18540",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Education",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Education Archives | KQED News",
"ogDescription": null
},
"ttid": 2595,
"slug": "education",
"isLoading": false,
"link": "/news/category/education"
},
"news_457": {
"type": "terms",
"id": "news_457",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "457",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Health",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Health Archives | KQED News",
"ogDescription": null
},
"ttid": 16998,
"slug": "health",
"isLoading": false,
"link": "/news/category/health"
},
"news_129": {
"type": "terms",
"id": "news_129",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "129",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Berkeley",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Berkeley Archives | KQED News",
"ogDescription": null
},
"ttid": 133,
"slug": "berkeley",
"isLoading": false,
"link": "/news/tag/berkeley"
},
"news_35288": {
"type": "terms",
"id": "news_35288",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "35288",
"found": true
},
"relationships": {},
"name": "Cal students",
"slug": "cal-students",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Cal students | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 35305,
"isLoading": false,
"link": "/news/tag/cal-students"
},
"news_36084": {
"type": "terms",
"id": "news_36084",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "36084",
"found": true
},
"relationships": {},
"name": "college students",
"slug": "college-students",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "college students | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 36101,
"isLoading": false,
"link": "/news/tag/college-students"
},
"news_20013": {
"type": "terms",
"id": "news_20013",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "20013",
"found": true
},
"relationships": {},
"featImg": null,
"name": "education",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "education Archives | KQED News",
"ogDescription": null
},
"ttid": 20030,
"slug": "education",
"isLoading": false,
"link": "/news/tag/education"
},
"news_18543": {
"type": "terms",
"id": "news_18543",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "18543",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Health",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Health Archives | KQED News",
"ogDescription": null
},
"ttid": 466,
"slug": "health",
"isLoading": false,
"link": "/news/tag/health"
},
"news_4950": {
"type": "terms",
"id": "news_4950",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "4950",
"found": true
},
"relationships": {},
"featImg": null,
"name": "smartphones",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "smartphones Archives | KQED News",
"ogDescription": null
},
"ttid": 4969,
"slug": "smartphones",
"isLoading": false,
"link": "/news/tag/smartphones"
},
"news_1089": {
"type": "terms",
"id": "news_1089",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "1089",
"found": true
},
"relationships": {},
"featImg": null,
"name": "social media",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "social media Archives | KQED News",
"ogDescription": null
},
"ttid": 1100,
"slug": "social-media",
"isLoading": false,
"link": "/news/tag/social-media"
},
"news_17597": {
"type": "terms",
"id": "news_17597",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "17597",
"found": true
},
"relationships": {},
"featImg": null,
"name": "UC Berkeley",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "UC Berkeley Archives | KQED News",
"ogDescription": null
},
"ttid": 17631,
"slug": "uc-berkeley",
"isLoading": false,
"link": "/news/tag/uc-berkeley"
},
"news_33742": {
"type": "terms",
"id": "news_33742",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33742",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Berkeley",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Berkeley Archives | KQED News",
"ogDescription": null
},
"ttid": 33759,
"slug": "berkeley",
"isLoading": false,
"link": "/news/interest/berkeley"
},
"news_33746": {
"type": "terms",
"id": "news_33746",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33746",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Education",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Education Archives | KQED News",
"ogDescription": null
},
"ttid": 33763,
"slug": "education",
"isLoading": false,
"link": "/news/interest/education"
},
"news_32664": {
"type": "terms",
"id": "news_32664",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "32664",
"found": true
},
"relationships": {},
"name": "AI software",
"slug": "ai-software",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "AI software | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 32681,
"isLoading": false,
"link": "/news/tag/ai-software"
},
"news_1323": {
"type": "terms",
"id": "news_1323",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "1323",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Donald Trump",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Donald Trump Archives | KQED News",
"ogDescription": null
},
"ttid": 1335,
"slug": "donald-trump",
"isLoading": false,
"link": "/news/tag/donald-trump"
},
"news_15": {
"type": "terms",
"id": "news_15",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "15",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Forum",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Forum Archives | KQED News",
"ogDescription": null
},
"ttid": 15,
"slug": "forum",
"isLoading": false,
"link": "/news/tag/forum"
},
"news_20058": {
"type": "terms",
"id": "news_20058",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "20058",
"found": true
},
"relationships": {},
"featImg": null,
"name": "U.S. Department of Justice",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "U.S. Department of Justice Archives | KQED News",
"ogDescription": null
},
"ttid": 20075,
"slug": "u-s-department-of-justice",
"isLoading": false,
"link": "/news/tag/u-s-department-of-justice"
},
"news_21417": {
"type": "terms",
"id": "news_21417",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "21417",
"found": true
},
"relationships": {},
"featImg": null,
"name": "U.S. Military",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "U.S. Military Archives | KQED News",
"ogDescription": null
},
"ttid": 21434,
"slug": "u-s-military",
"isLoading": false,
"link": "/news/tag/u-s-military"
},
"news_20526": {
"type": "terms",
"id": "news_20526",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "20526",
"found": true
},
"relationships": {},
"featImg": null,
"name": "H-1B visas",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "H-1B visas Archives | KQED News",
"ogDescription": null
},
"ttid": 20543,
"slug": "h-1b-visas",
"isLoading": false,
"link": "/news/tag/h-1b-visas"
},
"news_20611": {
"type": "terms",
"id": "news_20611",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "20611",
"found": true
},
"relationships": {},
"featImg": null,
"name": "immigrant",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "immigrant Archives | KQED News",
"ogDescription": null
},
"ttid": 20628,
"slug": "immigrant",
"isLoading": false,
"link": "/news/tag/immigrant"
},
"news_5702": {
"type": "terms",
"id": "news_5702",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "5702",
"found": true
},
"relationships": {},
"featImg": null,
"name": "video games",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "video games Archives | KQED News",
"ogDescription": null
},
"ttid": 5726,
"slug": "video-games",
"isLoading": false,
"link": "/news/tag/video-games"
},
"news_35248": {
"type": "terms",
"id": "news_35248",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "35248",
"found": true
},
"relationships": {},
"name": "visa",
"slug": "visa",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "visa | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 35265,
"isLoading": false,
"link": "/news/tag/visa"
}
},
"userAgentReducer": {
"userAgent": "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko; compatible; ClaudeBot/1.0; +claudebot@anthropic.com)",
"isBot": true
},
"userPermissionsReducer": {
"wpLoggedIn": false
},
"localStorageReducer": {},
"browserHistoryReducer": [],
"eventsReducer": {},
"fssReducer": {},
"tvDailyScheduleReducer": {},
"tvWeeklyScheduleReducer": {},
"tvPrimetimeScheduleReducer": {},
"tvMonthlyScheduleReducer": {},
"userAccountReducer": {
"user": {
"email": null,
"emailStatus": "EMAIL_UNVALIDATED",
"loggedStatus": "LOGGED_OUT",
"loggingChecked": false,
"articles": [],
"firstName": null,
"lastName": null,
"phoneNumber": null,
"fetchingMembership": false,
"membershipError": false,
"memberships": [
{
"id": null,
"startDate": null,
"firstName": null,
"lastName": null,
"familyNumber": null,
"memberNumber": null,
"memberSince": null,
"expirationDate": null,
"pfsEligible": false,
"isSustaining": false,
"membershipLevel": "Prospect",
"membershipStatus": "Non Member",
"lastGiftDate": null,
"renewalDate": null,
"lastDonationAmount": null
}
]
},
"authModal": {
"isOpen": false,
"view": "LANDING_VIEW"
},
"error": null
},
"youthMediaReducer": {},
"checkPleaseReducer": {
"filterData": {
"region": {
"key": "Restaurant Region",
"filters": [
"Any Region"
]
},
"cuisine": {
"key": "Restaurant Cuisine",
"filters": [
"Any Cuisine"
]
}
},
"restaurantDataById": {},
"restaurantIdsSorted": [],
"error": null
},
"location": {
"pathname": "/news/tag/technology",
"previousPathname": "/"
}
}