Lawyers for Elon Musk and Sam Altman Make Their Final Case in OpenAI Trial
How an OnlyFans Model and a Cosplayer Are Fighting Nonconsensual Deepfake Porn
Sam Altman Defends Himself From Elon Musk’s Accusations in OpenAI Trial
Former OpenAI Exec Calls Decision to Remove Sam Altman a ‘Hail Mary’ During Musk Trial
Inside Elon Musk and Sam Altman's Battle Over OpenAI
OpenAI Back in Court Over Canada School Shooter’s Use of ChatGPT
Elon Musk Says Sam Altman Tricked Him Into Funding OpenAI
Elon Musk Takes Aim at OpenAI as Trial Begins: ‘It’s Not OK to Steal a Charity’
How to Unscramble an Omelet in Silicon Valley: The Musk v. Altman Trial That Will Try
Player sponsored by
window.__IS_SSR__=true
window.__INITIAL_STATE__={
"attachmentsReducer": {
"audio_0": {
"type": "attachments",
"id": "audio_0",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background0.jpg"
}
}
},
"audio_1": {
"type": "attachments",
"id": "audio_1",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background1.jpg"
}
}
},
"audio_2": {
"type": "attachments",
"id": "audio_2",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background2.jpg"
}
}
},
"audio_3": {
"type": "attachments",
"id": "audio_3",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background3.jpg"
}
}
},
"audio_4": {
"type": "attachments",
"id": "audio_4",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background4.jpg"
}
}
},
"placeholder": {
"type": "attachments",
"id": "placeholder",
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"medium_large": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-768x512.jpg",
"width": 768,
"height": 512,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"fd-lrg": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"fd-med": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"fd-sm": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"xxsmall": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"xsmall": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"small": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"xlarge": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1920x1280.jpg",
"width": 1920,
"height": 1280,
"mimeType": "image/jpeg"
},
"guest-author-32": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 32,
"height": 32,
"mimeType": "image/jpeg"
},
"guest-author-50": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 50,
"height": 50,
"mimeType": "image/jpeg"
},
"guest-author-64": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 64,
"height": 64,
"mimeType": "image/jpeg"
},
"guest-author-96": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 96,
"height": 96,
"mimeType": "image/jpeg"
},
"guest-author-128": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 128,
"height": 128,
"mimeType": "image/jpeg"
},
"detail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 160,
"height": 160,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1.jpg",
"width": 2000,
"height": 1333
}
}
},
"news_12083803": {
"type": "attachments",
"id": "news_12083803",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12083803",
"found": true
},
"title": "Elon Musk v. OpenAI Trial Continues In California",
"publishDate": 1778804683,
"status": "inherit",
"parent": 12083612,
"modified": 1778805456,
"caption": "OpenAI's attorney William Savitt makes remarks during a press conference outside the courthouse at the Ronald V. Dellums Federal Building on May 6, 2026 in Oakland, California. An Oakland judge and jury will decide whether the company behind ChatGPT betrayed its mission of developing a safer, less risky AI. ",
"credit": "Benjamin Fanjoy/Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/OpenAILawyerGetty-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/OpenAILawyerGetty-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/OpenAILawyerGetty-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/OpenAILawyerGetty-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/OpenAILawyerGetty-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/OpenAILawyerGetty-600x600.jpg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/OpenAILawyerGetty.jpg",
"width": 2000,
"height": 1333
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12083429": {
"type": "attachments",
"id": "news_12083429",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12083429",
"found": true
},
"title": "Fanlock_web img",
"publishDate": 1778637003,
"status": "inherit",
"parent": 12083428,
"modified": 1778637319,
"caption": "A woman's face, distorted by digital glitching and overlaid with streams of binary code, reflects the loss of control over one's own image in the age of AI.",
"credit": "Composite by Morgan Sung; Image by Curly_photo/Getty Images",
"altTag": "A blurred, glitchy portrait of a woman's face overlaid with streams of binary code in teal and blue tones, evoking the intersection of identity and digital technology. The text \"Close All Tabs\" appears in the bottom right corner in a pixelated font.",
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/Fanlock_web-img-160x90.png",
"width": 160,
"height": 90,
"mimeType": "image/png"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/Fanlock_web-img-1536x864.png",
"width": 1536,
"height": 864,
"mimeType": "image/png"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/Fanlock_web-img-672x372.png",
"width": 672,
"height": 372,
"mimeType": "image/png"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/Fanlock_web-img-1038x576.png",
"width": 1038,
"height": 576,
"mimeType": "image/png"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/Fanlock_web-img-1200x675.png",
"width": 1200,
"height": 675,
"mimeType": "image/png"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/Fanlock_web-img-600x600.png",
"width": 600,
"height": 600,
"mimeType": "image/png"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/Fanlock_web-img.png",
"width": 1920,
"height": 1080
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12083392": {
"type": "attachments",
"id": "news_12083392",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12083392",
"found": true
},
"title": "260512-MUSK-ALTMAN-TRIAL-VB-03-KQED",
"publishDate": 1778625867,
"status": "inherit",
"parent": 0,
"modified": 1778625974,
"caption": "Open AI CEO Sam Altman testifies as a video of him is played on a screen in the trial in which Elon Musk claims that Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity rather than solely for profit in Oakland on May 12, 2026.",
"credit": "Vicki Behringer for KQED",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-03-KQED-160x90.jpg",
"width": 160,
"height": 90,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-03-KQED-1536x864.jpg",
"width": 1536,
"height": 864,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-03-KQED-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-03-KQED-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-03-KQED-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-03-KQED-600x600.jpg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-03-KQED.jpg",
"width": 2000,
"height": 1125
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12083235": {
"type": "attachments",
"id": "news_12083235",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12083235",
"found": true
},
"title": "Elon Musk v. OpenAI Trial Continues In California",
"publishDate": 1778545779,
"status": "inherit",
"parent": 12083224,
"modified": 1778546322,
"caption": "OpenAI CEO Sam Altman arrives at the Ronald V. Dellums Federal Building on April 30, 2026, in Oakland, California. Elon Musk invested in OpenAI early on, believing it would be a nonprofit, but is now suing OpenAI and its CEO, Sam Altman, for allegedly deceiving him by developing OpenAI into a for-profit company. ",
"credit": "Benjamin Fanjoy/Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/SamAltmanGetty-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/SamAltmanGetty-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/SamAltmanGetty-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/SamAltmanGetty-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/SamAltmanGetty-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/SamAltmanGetty-600x600.jpg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/SamAltmanGetty.jpg",
"width": 2000,
"height": 1333
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12082344": {
"type": "attachments",
"id": "news_12082344",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12082344",
"found": true
},
"title": "260504-MUSK-ALTMAN-VB-04-KQED",
"publishDate": 1777936709,
"status": "inherit",
"parent": 0,
"modified": 1777936770,
"caption": "Open AI President Greg Brockman testifies in the trial in which Elon Musk claims that Sam Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit in Oakland on May 4, 2026.",
"credit": "Vicki Behringer for KQED",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260504-MUSK-ALTMAN-VB-04-KQED-160x90.jpg",
"width": 160,
"height": 90,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260504-MUSK-ALTMAN-VB-04-KQED-1536x864.jpg",
"width": 1536,
"height": 864,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260504-MUSK-ALTMAN-VB-04-KQED-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260504-MUSK-ALTMAN-VB-04-KQED-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260504-MUSK-ALTMAN-VB-04-KQED-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260504-MUSK-ALTMAN-VB-04-KQED-600x600.jpg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260504-MUSK-ALTMAN-VB-04-KQED.jpg",
"width": 2000,
"height": 1125
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12082068": {
"type": "attachments",
"id": "news_12082068",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12082068",
"found": true
},
"title": "CANADA-SHOOTING-CRIME",
"publishDate": 1777656025,
"status": "inherit",
"parent": 12082064,
"modified": 1777677641,
"caption": "A young boy brings flowers to a memorial in honor of the victims of one of Canada's deadliest shootings in Tumbler Ridge, British Columbia, Canada, on Feb. 13, 2026. An 18-year-old carried out a mass shooting in a remote mining town, killing six people at a local school, after slaying her mother and stepbrother. Canadian Police Commander Dwayne McDonald said authorities still don't know the motive in the Feb. 10 mass shooting, but the shooter, who took her own life, was known to have mental health issues. ",
"credit": "Paige Taylor White/AFP via Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty-160x110.jpg",
"width": 160,
"height": 110,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty-1536x1053.jpg",
"width": 1536,
"height": 1053,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty-600x600.jpg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty.jpg",
"width": 2000,
"height": 1371
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12081681": {
"type": "attachments",
"id": "news_12081681",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12081681",
"found": true
},
"title": "260428-MUSK-ALTMAN-VB-03-KQED-1",
"publishDate": 1777416108,
"status": "inherit",
"parent": 12081603,
"modified": 1777508469,
"caption": "Representing Microsoft, Russell Coan (left) speaks as Elon Musk watches in the trial in which Elon Musk claims that Sam Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026.",
"credit": "Vicki Behringer for KQED",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-160x90.jpg",
"width": 160,
"height": 90,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-1536x864.jpg",
"width": 1536,
"height": 864,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-600x600.jpg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1.jpg",
"width": 2000,
"height": 1125
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12081639": {
"type": "attachments",
"id": "news_12081639",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12081639",
"found": true
},
"title": "260428-MUSK ALTMAN-VB-02-KQED",
"publishDate": 1777410140,
"status": "inherit",
"parent": 12081603,
"modified": 1777422271,
"caption": "Elon Musk (left) takes the stand in the trial in which Elon Musk claims that Sam Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026.",
"credit": "Vicki Behringer for KQED",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED-160x90.jpg",
"width": 160,
"height": 90,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED-1536x864.jpg",
"width": 1536,
"height": 864,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED-600x600.jpg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED.jpg",
"width": 2000,
"height": 1125
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12080929": {
"type": "attachments",
"id": "news_12080929",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12080929",
"found": true
},
"title": "260422-ALTMANMUSK-MD-01-KQED",
"publishDate": 1776885164,
"status": "inherit",
"parent": 0,
"modified": 1776885551,
"caption": "Once allies in what they called a mission to develop AI safely for humanity, Elon Musk and Sam Altman will let a federal judge and jury decide what that promise was worth. The trial is slated to begin April 27, 2026.",
"credit": "Left: Chip Somodevilla/Getty Images; Right: Fabrice Coffrini/AFP via Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED-1536x1025.jpg",
"width": 1536,
"height": 1025,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"npr-cds-square": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED-600x600.jpg",
"width": 600,
"height": 600,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED.jpg",
"width": 2000,
"height": 1334
}
},
"fetchFailed": false,
"isLoading": false
}
},
"audioPlayerReducer": {
"postId": "stream_live",
"isPaused": true,
"isPlaying": false,
"pfsActive": false,
"pledgeModalIsOpen": true,
"playerDrawerIsOpen": false
},
"authorsReducer": {
"byline_news_12082064": {
"type": "authors",
"id": "byline_news_12082064",
"meta": {
"override": true
},
"slug": "byline_news_12082064",
"name": "Matt O’Brien, Associated Press, and Nisa Khan, KQED",
"isLoading": false
},
"rachael-myrow": {
"type": "authors",
"id": "251",
"meta": {
"index": "authors_1716337520",
"id": "251",
"found": true
},
"name": "Rachael Myrow",
"firstName": "Rachael",
"lastName": "Myrow",
"slug": "rachael-myrow",
"email": "rmyrow@kqed.org",
"display_author_email": true,
"staff_mastheads": [
"news"
],
"title": "Senior Editor of KQED's Silicon Valley News Desk",
"bio": "• I write and edit stories about how Silicon Valley power and policies shape everyday life in California. I’m also passionate about making Bay Area history and culture more accessible to a broad public. • I’ve been a journalist for most of my life, starting in high school with The Franklin Press in Los Angeles, where I grew up. While earning my first degree in English at UC Berkeley, I got my start in public radio at KALX-FM. After completing a second degree in journalism at Cal, I landed my first professional job at Marketplace, then moved on to KPCC (now LAist), and then KQED, where I hosted The California Report for more than seven years. • My reporting has appeared on NPR, The World, WBUR’s \u003ci>Here & Now\u003c/i>, and the BBC. I also guest host for KQED’s \u003ci>Forum\u003c/i>, as well as the Commonwealth Club in San Francisco. • I speak periodically on media, democracy and technology issues, and do voiceover work for documentaries and educational video projects. • Outside of the studio, you'll find me hiking Bay Area trails and whipping up Insta-ready meals in my kitchen. • I do not accept gifts, money, or favors from anyone connected to my reporting, I don't pay people for information, and I do not support or donate to political causes. • I strive to treat the people I report on with fairness, honesty, and respect. I also recognize there are often multiple sides to a story and work to verify information through multiple sources and documentation. If I get something wrong, I correct it.",
"avatar": "https://secure.gravatar.com/avatar/87bf8cb5874e045cdff430523a6d48b1?s=600&d=blank&r=g",
"twitter": "rachaelmyrow",
"facebook": null,
"instagram": null,
"linkedin": "https://www.linkedin.com/in/rachaelmyrow/",
"sites": [
{
"site": "arts",
"roles": [
"administrator"
]
},
{
"site": "news",
"roles": [
"edit_others_posts",
"editor"
]
},
{
"site": "futureofyou",
"roles": [
"editor"
]
},
{
"site": "bayareabites",
"roles": [
"editor"
]
},
{
"site": "stateofhealth",
"roles": [
"editor"
]
},
{
"site": "science",
"roles": [
"editor"
]
},
{
"site": "food",
"roles": [
"editor"
]
},
{
"site": "forum",
"roles": [
"editor"
]
},
{
"site": "liveblog",
"roles": [
"author"
]
}
],
"headData": {
"title": "Rachael Myrow | KQED",
"description": "Senior Editor of KQED's Silicon Valley News Desk",
"ogImgSrc": "https://secure.gravatar.com/avatar/87bf8cb5874e045cdff430523a6d48b1?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/87bf8cb5874e045cdff430523a6d48b1?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/rachael-myrow"
},
"ecruzguevarra": {
"type": "authors",
"id": "8654",
"meta": {
"index": "authors_1716337520",
"id": "8654",
"found": true
},
"name": "Ericka Cruz Guevarra",
"firstName": "Ericka",
"lastName": "Cruz Guevarra",
"slug": "ecruzguevarra",
"email": "ecruzguevarra@kqed.org",
"display_author_email": true,
"staff_mastheads": [
"news"
],
"title": "Producer, The Bay Podcast",
"bio": "Ericka Cruz Guevarra is host of \u003ca href=\"https://www.kqed.org/podcasts/thebay\">\u003cem>The Bay\u003c/em>\u003c/a> podcast at KQED. Before host, she was the show’s producer. Her work in that capacity includes a three-part reported series on policing in Vallejo, which won a 2020 excellence in journalism award from the Society of Professional Journalists. Ericka has worked as a breaking news reporter at Oregon Public Broadcasting, helped produce the Code Switch podcast, and was KQED’s inaugural Raul Ramirez Diversity Fund intern. She’s also an alumna of NPR’s Next Generation Radio program. Send her an email if you have strong feelings about whether Fairfield and Suisun City are the Bay. Ericka is represented by SAG-AFTRA.",
"avatar": "https://secure.gravatar.com/avatar/25e5ab8d3d53fad2dcc7bb2b5c506b1a?s=600&d=blank&r=g",
"twitter": "NotoriousECG",
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "arts",
"roles": [
"subscriber"
]
},
{
"site": "news",
"roles": [
"editor",
"manage_categories"
]
},
{
"site": "futureofyou",
"roles": [
"subscriber"
]
},
{
"site": "stateofhealth",
"roles": [
"subscriber"
]
},
{
"site": "science",
"roles": [
"editor"
]
},
{
"site": "forum",
"roles": [
"subscriber"
]
}
],
"headData": {
"title": "Ericka Cruz Guevarra | KQED",
"description": "Producer, The Bay Podcast",
"ogImgSrc": "https://secure.gravatar.com/avatar/25e5ab8d3d53fad2dcc7bb2b5c506b1a?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/25e5ab8d3d53fad2dcc7bb2b5c506b1a?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/ecruzguevarra"
},
"amontecillo": {
"type": "authors",
"id": "11649",
"meta": {
"index": "authors_1716337520",
"id": "11649",
"found": true
},
"name": "Alan Montecillo",
"firstName": "Alan",
"lastName": "Montecillo",
"slug": "amontecillo",
"email": "amontecillo@kqed.org",
"display_author_email": false,
"staff_mastheads": [
"news"
],
"title": "KQED Contributor",
"bio": "Alan Montecillo is the senior editor of \u003cem>\u003ca href=\"http://kqed.org/thebay\">The Bay\u003c/a>, \u003c/em> KQED's local news podcast. Before moving to the Bay Area, he worked as a senior talk show producer for WILL in Champaign-Urbana, Illinois and at Oregon Public Broadcasting in Portland, Oregon. He has won journalism awards from the Society of Professional Journalists Northern California, the Public Media Journalists Association, The Signal Awards, and has also received a regional Edward R. Murrow award. Alan is a Filipino American from Hong Kong and a graduate of Reed College.",
"avatar": "https://secure.gravatar.com/avatar/d5e4e7a76481969ccba76f4e2b5ccabc?s=600&d=blank&r=g",
"twitter": "alanmontecillo",
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "",
"roles": [
"editor"
]
},
{
"site": "news",
"roles": [
"editor",
"manage_categories"
]
}
],
"headData": {
"title": "Alan Montecillo | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/d5e4e7a76481969ccba76f4e2b5ccabc?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/d5e4e7a76481969ccba76f4e2b5ccabc?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/amontecillo"
},
"jessicakariisa": {
"type": "authors",
"id": "11831",
"meta": {
"index": "authors_1716337520",
"id": "11831",
"found": true
},
"name": "Jessica Kariisa",
"firstName": "Jessica",
"lastName": "Kariisa",
"slug": "jessicakariisa",
"email": "jkariisa@kqed.org",
"display_author_email": false,
"staff_mastheads": [
"news"
],
"title": "Producer, The Bay",
"bio": "Jessica Kariisa is the producer of The Bay. She first joined KQED as an intern for The California Report Magazine, after which she became an on-call producer. She reported a Bay Curious episode on the use of rap lyrics in criminal trials which won a Society of Professional Journalists award in 2023 for Excellence in Features Journalism and the 2023 Signal Award for Best Conversation Starter. She’s worked on podcasts for Snap Judgment and American Public Media. Before embarking on her audio career, she was a music journalist.\r\n\r\nJessica Kariisa is represented by SAG-AFTRA.",
"avatar": "https://secure.gravatar.com/avatar/4afd355fd24f5515aeab77fd6c72b671?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "arts",
"roles": [
"author"
]
},
{
"site": "news",
"roles": [
"editor",
"manage_categories"
]
}
],
"headData": {
"title": "Jessica Kariisa | KQED",
"description": "Producer, The Bay",
"ogImgSrc": "https://secure.gravatar.com/avatar/4afd355fd24f5515aeab77fd6c72b671?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/4afd355fd24f5515aeab77fd6c72b671?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/jessicakariisa"
},
"chambrick": {
"type": "authors",
"id": "11832",
"meta": {
"index": "authors_1716337520",
"id": "11832",
"found": true
},
"name": "Chris Hambrick",
"firstName": "Chris",
"lastName": "Hambrick",
"slug": "chambrick",
"email": "chambrick@kqed.org",
"display_author_email": false,
"staff_mastheads": [],
"title": "KQED Contributor",
"bio": null,
"avatar": "https://secure.gravatar.com/avatar/c4a3663ebbd3a21fa35ef06a1236ce8a?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "",
"roles": [
"editor"
]
},
{
"site": "arts",
"roles": [
"editor"
]
},
{
"site": "news",
"roles": [
"editor"
]
},
{
"site": "podcasts",
"roles": [
"editor"
]
}
],
"headData": {
"title": "Chris Hambrick | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/c4a3663ebbd3a21fa35ef06a1236ce8a?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/c4a3663ebbd3a21fa35ef06a1236ce8a?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/chambrick"
},
"cegusa": {
"type": "authors",
"id": "11869",
"meta": {
"index": "authors_1716337520",
"id": "11869",
"found": true
},
"name": "Chris Egusa",
"firstName": "Chris",
"lastName": "Egusa",
"slug": "cegusa",
"email": "cegusa@kqed.org",
"display_author_email": false,
"staff_mastheads": [],
"title": "KQED Contributor",
"bio": null,
"avatar": "https://secure.gravatar.com/avatar/86d00b34cb7eeb5247e991f0e20c70c4?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "arts",
"roles": [
"editor"
]
}
],
"headData": {
"title": "Chris Egusa | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/86d00b34cb7eeb5247e991f0e20c70c4?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/86d00b34cb7eeb5247e991f0e20c70c4?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/cegusa"
},
"kdebenedetti": {
"type": "authors",
"id": "11913",
"meta": {
"index": "authors_1716337520",
"id": "11913",
"found": true
},
"name": "Katie DeBenedetti",
"firstName": "Katie",
"lastName": "DeBenedetti",
"slug": "kdebenedetti",
"email": "kdebenedetti@kqed.org",
"display_author_email": false,
"staff_mastheads": [
"news",
"science"
],
"title": "KQED Contributor",
"bio": "Katie DeBenedetti is a digital reporter covering daily news for the Express Desk. Prior to joining KQED as a culture reporting intern in January 2024, she covered education and city government for the Napa Valley Register.",
"avatar": "https://secure.gravatar.com/avatar/6e31073cb8f7e4214ab03f42771d0f45?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "news",
"roles": [
"author"
]
},
{
"site": "science",
"roles": [
"author"
]
},
{
"site": "liveblog",
"roles": [
"author"
]
}
],
"headData": {
"title": "Katie DeBenedetti | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/6e31073cb8f7e4214ab03f42771d0f45?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/6e31073cb8f7e4214ab03f42771d0f45?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/kdebenedetti"
},
"mcueva": {
"type": "authors",
"id": "11943",
"meta": {
"index": "authors_1716337520",
"id": "11943",
"found": true
},
"name": "Maya Cueva",
"firstName": "Maya",
"lastName": "Cueva",
"slug": "mcueva",
"email": "mcueva@kqed.org",
"display_author_email": false,
"staff_mastheads": [],
"title": "KQED Contributor",
"bio": null,
"avatar": "https://secure.gravatar.com/avatar/26d0967153608e4720f52779f754087a?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "news",
"roles": [
"editor"
]
}
],
"headData": {
"title": "Maya Cueva | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/26d0967153608e4720f52779f754087a?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/26d0967153608e4720f52779f754087a?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/mcueva"
},
"msung": {
"type": "authors",
"id": "11944",
"meta": {
"index": "authors_1716337520",
"id": "11944",
"found": true
},
"name": "Morgan Sung",
"firstName": "Morgan",
"lastName": "Sung",
"slug": "msung",
"email": "msung@kqed.org",
"display_author_email": false,
"staff_mastheads": [],
"title": "Close All Tabs Host",
"bio": null,
"avatar": "https://secure.gravatar.com/avatar/34033b8d232ee6c987ca6f0a1a28f0e5?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "news",
"roles": [
"editor"
]
}
],
"headData": {
"title": "Morgan Sung | KQED",
"description": "Close All Tabs Host",
"ogImgSrc": "https://secure.gravatar.com/avatar/34033b8d232ee6c987ca6f0a1a28f0e5?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/34033b8d232ee6c987ca6f0a1a28f0e5?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/msung"
}
},
"breakingNewsReducer": {},
"pagesReducer": {},
"postsReducer": {
"stream_live": {
"type": "live",
"id": "stream_live",
"audioUrl": "https://streams.kqed.org/kqedradio",
"title": "Live Stream",
"excerpt": "Live Stream information currently unavailable.",
"link": "/radio",
"featImg": "",
"label": {
"name": "KQED Live",
"link": "/"
}
},
"stream_kqedNewscast": {
"type": "posts",
"id": "stream_kqedNewscast",
"audioUrl": "https://www.kqed.org/.stream/anon/radio/RDnews/newscast.mp3?_=1",
"title": "KQED Newscast",
"featImg": "",
"label": {
"name": "88.5 FM",
"link": "/"
}
},
"news_12083612": {
"type": "posts",
"id": "news_12083612",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12083612",
"score": null,
"sort": [
1778805082000
]
},
"guestAuthors": [],
"slug": "lawyers-for-elon-musk-and-sam-altman-make-their-final-case-in-openai-trial",
"title": "Lawyers for Elon Musk and Sam Altman Make Their Final Case in OpenAI Trial",
"publishDate": 1778805082,
"format": "standard",
"headTitle": "Lawyers for Elon Musk and Sam Altman Make Their Final Case in OpenAI Trial | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>Whether \u003ca href=\"https://www.kqed.org/news/tag/openai\">OpenAI\u003c/a> CEO \u003ca href=\"https://www.kqed.org/news/tag/sam-altman\">Sam Altman\u003c/a> and other executives betrayed their commitment to building a safe, open-source artificial intelligence, slighting billionaire Elon Musk in the process, will be decided by an Oakland jury and judge.\u003c/p>\n\u003cp>For weeks, the tech executives have sparred in federal court over whether the startup, first proposed by Altman to Musk as a sort of AI “Manhattan Project,” has abandoned its original mission to enrich itself. Musk, who provided $38 million in early funding, has accused his former OpenAI co-founders Altman and Greg Brockman of “stealing a charity.”\u003c/p>\n\u003cp>OpenAI’s executives, on the other hand, have said Musk only sued after he brought his own AI competitor, xAI, onto the market.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>During his closing statement, Musk’s lead counsel, Steven Molo, focused on Altman’s credibility. He asked the jury to consider hypothetically what they would do if they came upon a bridge, suspended 150 feet above a river, and built on Altman’s “version of the truth.”\u003c/p>\n\u003cp>“Would you walk across that bridge?” He asked. “I don’t think many people would.”\u003c/p>\n\u003cp>Molo said that in the early years of OpenAI, the intent was to create a technology “for the good of the world.” He pointed to Musk’s early fears of the dangers of artificial general intelligence, or AGI, an early mission statement that said OpenAI would not be constrained by a need to generate financial return and correspondence between Altman and Musk that expressed support by both of them for a nonprofit structure and safety-focused mission.\u003c/p>\n\u003cfigure id=\"attachment_12081686\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081686\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Steve Molo, Elon Musk’s attorney, presents opening statements in the trial in which Elon Musk (center-right) claims that Sam Altman (right) and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“There was no disagreement over the core mission,” Molo said.\u003c/p>\n\u003cp>But, he said, since OpenAI launched a for-profit subsidiary in 2019 — after Musk departed — Altman and his fellow executives have treated the nonprofit as a “shell,” transferring intellectual property and the vast majority of employees to the for-profit arm of the company. In 2023, Molo continued, after OpenAI made a $10 billion deal with Microsoft, the company failed to prioritize safety, abandoned its commitment to open sourcing and “enriched investors and insiders.”\u003c/p>\n\u003cp>“They’re motivated by money: Microsoft and Altman,” Molo said.\u003c/p>\n\u003cp>Microsoft CEO Satya Nadella testified that the company had invested $13 billion and expects to see a return of about $92 billion. Molo also pointed out that other executives, including Brockman and founding OpenAI computer scientist Ilya Sutskever, testified to having billions in equity, despite not investing in the company.[aside postID=news_12083278 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-03-KQED.jpg']Altman’s attorneys argued that Musk’s case was baseless: not only was Molo’s characterization false, but they argued, the larger issue is that Musk’s contributions to OpenAI — in the form of rent payments, Tesla Model 3 cars and $25 million in quarterly donations — were never accompanied by specific promises for their use.\u003c/p>\n\u003cp>“If the donations came with no strings attached, then Mr. Musk does not have a charitable trust to enforce,” Sarah Eddy, an attorney for OpenAI’s defendants, said.\u003c/p>\n\u003cp>She and Altman’s lead counsel, William Savitt, also spent much of their closing arguments painting Musk as not wanting to protect humanity from AGI, as he’s suggested, but wanting to be the one who controls it.\u003c/p>\n\u003cp>They allege Musk brought his suit after he tried to wrest control of a potential for-profit arm of OpenAI, and later absorb the organization into Tesla, in 2017. The executives had begun discussing a for-profit expansion that year to solicit more funding for top talent and “compute” to compete with other industry leaders.\u003c/p>\n\u003cp>Musk departed OpenAI in February 2018, after a falling-out with the other executives over those discussions.\u003c/p>\n\u003cfigure id=\"attachment_12075382\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12075382\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-01-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-01-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-01-KQED-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-01-KQED-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">A courtroom sketch of Elon Musk on the stand as he’s questioned by the plaintiff’s attorney, Aaron P. Arnzen, on March 4, 2026. Musk is accused of making false and misleading statements that drove down Twitter’s stock price before he bought the social media platform for $44 billion in 2022. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Shortly after, OpenAI decided to introduce a for-profit public benefit corporation. It has since become a $850 billion company, and is considering an initial public offering estimated at up to a trillion dollars.\u003c/p>\n\u003cp>OpenAI’s attorneys alleged that Musk saw OpenAI’s skyrocketing success and filed his suit to destroy a competitor in the field.\u003c/p>\n\u003cp>“The truth is that Mr. Musk wanted a for-profit AI, and he wanted to dominate it,” Eddy said.\u003c/p>\n\u003cp>The jury is set to begin deliberations on Monday. If they side with Musk, OpenAI and Microsoft could owe $150 billion in damages to be redirected to the nonprofit foundation, along with a court order dismantling OpenAI’s for-profit structure and removal of Altman and Brockman from their posts.\u003c/p>\n\u003cfigure id=\"attachment_12083616\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12083616\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/Gonzalez-Rogers-Yvonne.jpg\" alt=\"\" width=\"2000\" height=\"1334\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/Gonzalez-Rogers-Yvonne.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/Gonzalez-Rogers-Yvonne-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/Gonzalez-Rogers-Yvonne-1536x1025.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Judge Yvonne Gonzalez Rogers of the U.S. District Court for the Northern District of California. \u003ccite>(Courtesy of Daily Journal)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The jury will not have the final say, though. In a rare, but not unprecedented, move, U.S. District Judge Yvonne Gonzalez Rogers will have the ultimate right to rule on the claims.\u003c/p>\n\u003cp>According to Charlie Bullock, a senior research fellow at the Institute for Law and AI, this is because most times, “equitable claims” — breach of charitable trust and unjust enrichment — which involve non-monetary remedies, are decided by a judge.\u003c/p>\n\u003cp>In this case, Gonzalez Rogers elected to have an advisory jury, and Bullock said that typically, judges choose to go along with their decision.\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "An Oakland judge and jury will decide whether the company behind ChatGPT betrayed its mission of developing a safer, less risky AI. ",
"status": "publish",
"parent": 0,
"modified": 1778813223,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 23,
"wordCount": 1022
},
"headData": {
"title": "Lawyers for Elon Musk and Sam Altman Make Their Final Case in OpenAI Trial | KQED",
"description": "An Oakland judge and jury will decide whether the company behind ChatGPT betrayed its mission of developing a safer, less risky AI. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Lawyers for Elon Musk and Sam Altman Make Their Final Case in OpenAI Trial",
"datePublished": "2026-05-14T17:31:22-07:00",
"dateModified": "2026-05-14T19:47:03-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"sticky": false,
"nprStoryId": "kqed-12083612",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12083612/lawyers-for-elon-musk-and-sam-altman-make-their-final-case-in-openai-trial",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>Whether \u003ca href=\"https://www.kqed.org/news/tag/openai\">OpenAI\u003c/a> CEO \u003ca href=\"https://www.kqed.org/news/tag/sam-altman\">Sam Altman\u003c/a> and other executives betrayed their commitment to building a safe, open-source artificial intelligence, slighting billionaire Elon Musk in the process, will be decided by an Oakland jury and judge.\u003c/p>\n\u003cp>For weeks, the tech executives have sparred in federal court over whether the startup, first proposed by Altman to Musk as a sort of AI “Manhattan Project,” has abandoned its original mission to enrich itself. Musk, who provided $38 million in early funding, has accused his former OpenAI co-founders Altman and Greg Brockman of “stealing a charity.”\u003c/p>\n\u003cp>OpenAI’s executives, on the other hand, have said Musk only sued after he brought his own AI competitor, xAI, onto the market.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>During his closing statement, Musk’s lead counsel, Steven Molo, focused on Altman’s credibility. He asked the jury to consider hypothetically what they would do if they came upon a bridge, suspended 150 feet above a river, and built on Altman’s “version of the truth.”\u003c/p>\n\u003cp>“Would you walk across that bridge?” He asked. “I don’t think many people would.”\u003c/p>\n\u003cp>Molo said that in the early years of OpenAI, the intent was to create a technology “for the good of the world.” He pointed to Musk’s early fears of the dangers of artificial general intelligence, or AGI, an early mission statement that said OpenAI would not be constrained by a need to generate financial return and correspondence between Altman and Musk that expressed support by both of them for a nonprofit structure and safety-focused mission.\u003c/p>\n\u003cfigure id=\"attachment_12081686\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081686\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Steve Molo, Elon Musk’s attorney, presents opening statements in the trial in which Elon Musk (center-right) claims that Sam Altman (right) and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“There was no disagreement over the core mission,” Molo said.\u003c/p>\n\u003cp>But, he said, since OpenAI launched a for-profit subsidiary in 2019 — after Musk departed — Altman and his fellow executives have treated the nonprofit as a “shell,” transferring intellectual property and the vast majority of employees to the for-profit arm of the company. In 2023, Molo continued, after OpenAI made a $10 billion deal with Microsoft, the company failed to prioritize safety, abandoned its commitment to open sourcing and “enriched investors and insiders.”\u003c/p>\n\u003cp>“They’re motivated by money: Microsoft and Altman,” Molo said.\u003c/p>\n\u003cp>Microsoft CEO Satya Nadella testified that the company had invested $13 billion and expects to see a return of about $92 billion. Molo also pointed out that other executives, including Brockman and founding OpenAI computer scientist Ilya Sutskever, testified to having billions in equity, despite not investing in the company.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12083278",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-03-KQED.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Altman’s attorneys argued that Musk’s case was baseless: not only was Molo’s characterization false, but they argued, the larger issue is that Musk’s contributions to OpenAI — in the form of rent payments, Tesla Model 3 cars and $25 million in quarterly donations — were never accompanied by specific promises for their use.\u003c/p>\n\u003cp>“If the donations came with no strings attached, then Mr. Musk does not have a charitable trust to enforce,” Sarah Eddy, an attorney for OpenAI’s defendants, said.\u003c/p>\n\u003cp>She and Altman’s lead counsel, William Savitt, also spent much of their closing arguments painting Musk as not wanting to protect humanity from AGI, as he’s suggested, but wanting to be the one who controls it.\u003c/p>\n\u003cp>They allege Musk brought his suit after he tried to wrest control of a potential for-profit arm of OpenAI, and later absorb the organization into Tesla, in 2017. The executives had begun discussing a for-profit expansion that year to solicit more funding for top talent and “compute” to compete with other industry leaders.\u003c/p>\n\u003cp>Musk departed OpenAI in February 2018, after a falling-out with the other executives over those discussions.\u003c/p>\n\u003cfigure id=\"attachment_12075382\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12075382\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-01-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-01-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-01-KQED-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-01-KQED-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">A courtroom sketch of Elon Musk on the stand as he’s questioned by the plaintiff’s attorney, Aaron P. Arnzen, on March 4, 2026. Musk is accused of making false and misleading statements that drove down Twitter’s stock price before he bought the social media platform for $44 billion in 2022. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Shortly after, OpenAI decided to introduce a for-profit public benefit corporation. It has since become a $850 billion company, and is considering an initial public offering estimated at up to a trillion dollars.\u003c/p>\n\u003cp>OpenAI’s attorneys alleged that Musk saw OpenAI’s skyrocketing success and filed his suit to destroy a competitor in the field.\u003c/p>\n\u003cp>“The truth is that Mr. Musk wanted a for-profit AI, and he wanted to dominate it,” Eddy said.\u003c/p>\n\u003cp>The jury is set to begin deliberations on Monday. If they side with Musk, OpenAI and Microsoft could owe $150 billion in damages to be redirected to the nonprofit foundation, along with a court order dismantling OpenAI’s for-profit structure and removal of Altman and Brockman from their posts.\u003c/p>\n\u003cfigure id=\"attachment_12083616\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12083616\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/Gonzalez-Rogers-Yvonne.jpg\" alt=\"\" width=\"2000\" height=\"1334\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/Gonzalez-Rogers-Yvonne.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/Gonzalez-Rogers-Yvonne-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/Gonzalez-Rogers-Yvonne-1536x1025.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Judge Yvonne Gonzalez Rogers of the U.S. District Court for the Northern District of California. \u003ccite>(Courtesy of Daily Journal)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The jury will not have the final say, though. In a rare, but not unprecedented, move, U.S. District Judge Yvonne Gonzalez Rogers will have the ultimate right to rule on the claims.\u003c/p>\n\u003cp>According to Charlie Bullock, a senior research fellow at the Institute for Law and AI, this is because most times, “equitable claims” — breach of charitable trust and unjust enrichment — which involve non-monetary remedies, are decided by a judge.\u003c/p>\n\u003cp>In this case, Gonzalez Rogers elected to have an advisory jury, and Bullock said that typically, judges choose to go along with their decision.\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12083612/lawyers-for-elon-musk-and-sam-altman-make-their-final-case-in-openai-trial",
"authors": [
"11913",
"251"
],
"categories": [
"news_31795",
"news_28250",
"news_8",
"news_248"
],
"tags": [
"news_34755",
"news_1386",
"news_32668",
"news_3897",
"news_21891",
"news_34054",
"news_33542",
"news_33543",
"news_34586",
"news_1631"
],
"featImg": "news_12083803",
"label": "news"
},
"news_12083428": {
"type": "posts",
"id": "news_12083428",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12083428",
"score": null,
"sort": [
1778666407000
]
},
"guestAuthors": [],
"slug": "how-an-onlyfans-model-and-a-cosplayer-are-fighting-nonconsensual-deepfake-porn",
"title": "How an OnlyFans Model and a Cosplayer Are Fighting Nonconsensual Deepfake Porn",
"publishDate": 1778666407,
"format": "audio",
"headTitle": "How an OnlyFans Model and a Cosplayer Are Fighting Nonconsensual Deepfake Porn | KQED",
"labelTerm": {},
"content": "\u003cp>\u003ca href=\"#episode-transcript\">\u003ci>View the full episode transcript.\u003c/i>\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">We’re \u003c/span>\u003cspan style=\"font-weight: 400\">diving into the world of nonconsensual deepfake porn and why this problem reaches far beyond influencers and sex workers.\u003c/span>\u003cspan style=\"font-weight: 400\">\u003cbr>\n\u003c/span>\u003cspan style=\"font-weight: 400\">\u003cbr>\n\u003c/span>\u003cspan style=\"font-weight: 400\">When users on X started asking Grok to generate explicit images of real women and girls without their consent, Twitch streamer and OnlyFans creator Morgpie watched the harassment spiral in real time. Cosplayer and software engineer Zander Small saw firsthand how nonconsensual images affected his girlfriend, a SFW creator, and her friends. The two decided to team up to build tools that help creators detect leaks, remove deepfakes, and reclaim control over their images online.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Note:\u003c/b>\u003cspan style=\"font-weight: 400\"> This episode contains mentions of gender-based violence and nonconsensual intimate imagery, which may be triggering for some listeners. \u003c/span>\u003c/p>\n\u003cp>\u003c!-- iframe plugin v.4.3 wordpress.org/plugins/iframe/ -->\u003cbr>\n\u003ciframe loading=\"lazy\" frameborder=\"0\" height=\"200\" scrolling=\"no\" src=\"https://playlist.megaphone.fm?e=KQINC5643980688\" width=\"100%\" class=\"iframe-class\">\u003c/iframe>\u003c/p>\n\u003cp>\u003cstrong>Guest:\u003c/strong>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://www.instagram.com/bigguswombus/\">\u003cspan style=\"font-weight: 400\">Morgpie\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">, OnlyFans creator and cofounder of Fanlock\u003c/span>\u003c/li>\n\u003cli>\u003ca href=\"https://www.instagram.com/zander_smalls/\">\u003cspan style=\"font-weight: 400\">Zander Small\u003c/span>\u003c/a>\u003cb>, \u003c/b>\u003cspan style=\"font-weight: 400\">content creator and cofounder of Fanlock\u003c/span>\u003c/li>\n\u003c/ul>\n\u003cp>\u003cb>Further Reading/Listening:\u003c/b>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://www.dexerto.com/twitch/influencers-take-on-ai-deepfakes-with-new-creator-protection-agency-3324719/\">\u003cspan style=\"font-weight: 400\">Influencers take on AI deepfakes with their own creator protection agency\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Virginia Glaze, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">Dextero\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.nbcnews.com/tech/tech-news/musks-ai-chatbot-grok-xai-making-sexual-deepfakes-imagine-rcna265855\">\u003cspan style=\"font-weight: 400\">Musk’s Grok AI chatbot is still making sexual deepfakes, despite X’s promise to stop it\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — David Ingram, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">NBC News\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.wired.com/story/deepfake-nudify-schools-global-crisis/\">\u003cspan style=\"font-weight: 400\">The Deepfake Nudes Crisis in Schools Is Much Worse Than You Thought\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Matt Burgess, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">WIRED\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://19thnews.org/2025/05/take-it-down-act-signing-explicit-images\">\u003cspan style=\"font-weight: 400\">Take It Down Act: How to use it to remove revenge porn\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Jasmine Mithani, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">The 19th\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://rainn.org/rainns-recommendations-for-legislators/image-based-sexual-abuse-laws-combat-nonconsensual-ai-deepfakes/\">\u003cspan style=\"font-weight: 400\">Image-Based Sexual Abuse Laws: Combat Nonconsensual AI Deepfakes\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">RAINN\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://rainn.org/get-informed/issues/ai-tech-enabled-sexual-abuse/\">\u003cspan style=\"font-weight: 400\">AI & Tech-Enabled Sexual Abuse: Risk & Prevention\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">RAINN\u003c/span>\u003c/i>\u003cbr>\n\u003ca href=\"https://deepstrike.io/blog/deepfake-statistics-2025\">\u003cspan style=\"font-weight: 400\">Deepfake Statistics 2025: AI Fraud Data & Trends\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Mohammed Khalil, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">DeepStrike\u003c/span>\u003c/i>\u003c/li>\n\u003c/ul>\n\u003cp>Want to give us feedback on the show? Shoot us an email at \u003ca href=\"mailto:CloseAllTabs@KQED.org\">CloseAllTabs@KQED.org\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Follow us on\u003c/span>\u003ca href=\"https://www.instagram.com/closealltabspod/\"> \u003cspan style=\"font-weight: 400\">Instagram\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> and\u003c/span>\u003ca href=\"https://www.tiktok.com/@closealltabs\"> \u003cspan style=\"font-weight: 400\">TikTok\u003c/span>\u003c/a>\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003ch2 id=\"episode-transcript\">Episode Transcript\u003c/h2>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">This is a computer-generated transcript. While our team has reviewed it, there may be errors.\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Are you closing your tabs? You can be honest, this is a safe space. If you’re a fan of Close All Tabs and you want more of it, then please rate and review us on Spotify, Apple Podcasts, or wherever you listen to the show. And tell your friends about us. It would be such a huge help to get the word out.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Okay, let’s get to the show. Just a note, this episode contains mentions of gender-based violence and non-consensual intimate imagery, which may be triggering for some listeners.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">So, you know Grok? It’s the AI chatbot integrated with X, the social media app formerly known as Twitter and now owned by Elon Musk. Well, since late last year, Grok has been embroiled in an undressing scandal, generating sexually explicit images of people without their consent. The majority of targets were women. Some were minors, young girls. For a few weeks, it was a pretty disgusting widespread trend. When women or even teenage girls posted fully clothed photos of themselves on X, other users would comment and tag Grok, asking it to ‘put her in a bikini’ or ‘take off her top.’ The chatbot would publicly respond with a generated lewd or completely naked image of the subject. Some users went even further, asking Grok to add blood and bruises, prompting the chatbot to generate graphic, sexually violent images of these women.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">Oh man, it was very much like I was waking up every day and I didn’t want to post.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">This is Morgpie, a Twitch streamer and OnlyFans creator. People who know her IRL call her Morgan. She’s been a porn actress for years, and as someone who makes sexually explicit content, she’s used to creeps harassing her with her own nudes. But the Grok and dressing trend really unsettled her. It was the worst in January.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">Being looped in with something that is so violating, and like you said, something that’s even affecting minors is just disgusting. Every day I was going into my comments and just like hiding replies and blocking because I’m like, I’m not going to let you guys just generate these images of me that I did not consent to, especially if it’s being associated with basically creating child pornography on Twitter.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">This was non-consensual, intimate imagery, more commonly known as deep fake porn. A deep fake is content that has been generated or manipulated by AI to imitate someone else. Zander Small, another content creator and a friend of Morgan’s, says that the proliferation of AI tools has started to seriously affect content creators, regardless of whether or not they make adult content.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">Deep fakes can be anything from deep fake explicit imagery with like, a creator doing something or nude content that they didn’t consent to. Or it could be stuff as simple as like, an audio deep fake where a creator is saying something that they don’t consent too, which might have repercussions of them being canceled or stuff that they just obviously wouldn’t consent to saying.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Morgan hasn’t had to deal with deep fake porn of herself as much. After years of being in this industry, she’s developed thick skin. She’s mostly dealt with leaks, or explicit content that she posted behind a paywall that was illegally downloaded and posted elsewhere, without her consent. But the Grok trend is just the tip of the iceberg. Non-consensual deep fake-porn has exploded over the last few years.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">I think that for a lot of people, the lack of consent is very attractive.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">This is an issue that overwhelmingly affects women, and these circles are not as fringe as you might think. An annual report last year by the cybersecurity firm DeepStrike found that roughly 97% of all deepfakes online fall under non-consensual intimate imagery, and that 99 to 100% of victims of deepfake pornography are women. Here’s Zander again.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">I think it is either fans, if you want to call them that, or just creeps on the internet, wanting to see more out of a creator than they consented to. I know it affects a lot of SFW creators.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">SFW, or Safe for Work. They don’t show nudity or make sexually explicit content. While NSFW, not Safer work, means adult content.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">Uh, you know, and I guess from that, you know, if a creator isn’t consenting to do more explicit content, then, you know, these, uh, I guess perpetrators, creeps, whatever you want to call them, you know, take into their hands to do it themselves. And it’s incredibly easy to deep fake content and, you know, as models get better and better and they get quicker and quicker, it doesn’t really require as much of sophisticated technology to run these models.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Some of the mainstream models, ChatGPT, Gemini, Claude, have guardrails that are supposed to prevent them from generating deep fake porn. In January, X announced that it implemented technological measures to prevent Grok from modifying images of real people in revealing clothing. But there are ways to get around these guardraills. Just last month, NBC News reported that Grok is still generating deep-fake porn of real women. And like Zander said, there are so many other models out there that just don’t have these guardrails in the first place.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Zander hasn’t had to deal with deep fake porn of himself, but he’s seen how much it’s affected people he’s close to, other safer work creators who don’t make explicit content. And Morgan, coming from the porn industry, has seen how this issue affects her fellow adult content creators.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">So late last year, they teamed up to come up with a solution for other creators. Today, we’re diving into the seedy reality of non-consensual deepfake porn, when it got so bad, why it’s so hard to stop, and how two Gen Z content creators are trying to tackle it. Ready?\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">This is Close All Tabs. I’m Morgan Sung, tech journalist and your chronically online friend, here to open as many browser tabs as it takes to help you understand how the digital world affects our real lives. Let’s get into it.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Let’s open our first tab: the reality of non-consensual deep fake porn. Morgan is an award-winning porn creator. Literally, she has multiple Pornhub awards. And when she started years ago, the internet was very different.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">It’s very interesting because when I first started, the climate was very much like, if you opened up Twitter, you would see tweets that are like, ‘sex work is real work.’ Of course, this was kind of around the time when OnlyFans was only just emerging barely.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">In the world of adult content, there was before OnlyFans, and then there’s after OnlyFans. The platform completely changed the game, lowering the barrier of entry for new creators and giving them new options to monetize their content. Morgan said that before OnlyFans blew up, the only way to make a living as an independent porn creator was to land on the front page of Pornhub, or actresses had to break into the industry by being part of studio productions where they didn’t have as much autonomy.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">It’s very interesting the shift between whenever porn was basically widely available, you didn’t really have to pay much for it. When I first started, I was uploading to Pornhub, and that was full length, full scenes that you could see for free at any time. Whereas now, the climate has shifted a lot to where creators like myself have a lot more control. So we’re able to, you know, use OnlyFans as a platform where we are more connected with our audience and that is actually the main pull. Now we’re in this age where these models can kind of take a bit of that control back. They can control what content they make, how much they sell it for. And I think that that plays so much into like the conversation about deepfakes where it’s about control. It’s all about consent. And then with deepfakes, you can make anybody do anything. So you have the control over this other person.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Since joining OnlyFans, Morgan and other adult creators have dealt with the same problem: leaks. They consent to paying subscribers accessing certain premium content that’s been posted behind a paywall. Then some unscrupulous subscriber downloads it and posts it publicly without their permission for the rest of the world to see. It was a constant source of frustration for Morgan.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">And then about a year and a half ago, Morgan noticed the deep fakes. Her friends told her about how they stumbled across videos of themselves online, but it wasn’t really them. Someone had taken explicit content from behind their paywalls and modified it, morphing them into these scenarios that the creators never wanted to be in.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">Again, it all stems back to control. It’s like, ‘oh, you did this thing that I didn’t like. Well, look at this control I have over your image. I’m going to use that against you.’.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">I think some detractors would say, like, ‘oh, well, if you make explicit content, why does deep fake porn bother you? Or why do your leaks bother you?’ What would you say to them?\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">I mean, it’s it’s all about consent. That’s like saying, ‘oh, because you make porn, if I see you on the street, I can sexually assault you.’ You know, it’s like, consent is a very real thing. And there’s a big difference between me in the comfort of my own home within my own boundaries, producing content that I enjoy, and somebody else taking these things and making content that I didn’t consent to be in.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">It’s not just Morgan and her fellow porn actresses dealing with this. Women who don’t make explicit content are also subjected to this harassment. One of the most well-known cases of this was when Atrioc, a Twitch streamer, was live. During his stream, he showed his open tabs for a split second, and one of them included deep fake porn of his own friends and streaming colleagues. He was caught buying this content. QTCinderella, another streamer was one of Atriok’s close friends. She was also a victim of the deep fake porn he bought.\u003c/span>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">[Audio Clip of QTCinderella]\u003c/span>\u003c/i>\u003cb>\u003cbr>\n\u003c/b>\u003ci>\u003cspan style=\"font-weight: 400\">Atrioc for showing it to thousands of people, the people DMing me pictures of myself from that website, f*ck you all!\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">Pokimane is like a great example of this.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Pokimane is another Twitch streamer who was also a victim of atriox deep fake porn purchases. She does not make explicit adult content, but as a woman existing online, she deals with harassment constantly. Like, here she is reading comments from her audience.\u003c/span>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">[Audio clip of Pokimane]\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">Yo yo yo, let’s see some ass. This ain’t a club fam, this is just my Twitch chat.\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">There are so many clips of her literally just getting up and standing up out of her chair and that’ll get clipped and posted all over Twitter. And all of Twitter is like, ‘look at what she’s doing. She’s gooner baiting!’.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Goonerbait started as a term to describe video games or anime that aren’t pornographic but contain a lot of sexual imagery like jiggle physics and very scantily clad female characters. It’s media designed to appeal to gooners. Gooners are porn addicts. And now, internet randos love to accuse real-life women of gooner baiting, mainly female streamers like Pokimane.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">‘She’s, you know, performing for her male audience.’ And it’s like, well, is she really doing anything? She kind of just got up and walked out of the room, but they’re like, ‘oh, her pants are a little too tight.’ So it’s, like, I think this idea of a woman that’s kind of, just not really even doing anything, a lot of people love to just over-sexualize.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">In some online circles, there is the sentiment that women like Morgan deserve to be deepfaked because they already make porn, and that women, like Pokimane, also deserve to deepfake because they’re somehow gooner baiting. It even affects people who don’t post online. Non-consensual deepfakes are rampant in schools. A Wired investigation last month found that high school boys have targeted their fellow classmates by spreading fake, generated nudes of them. These are teenage girls.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, the thing is, it’s not going to stop with sex workers. As much as the sentiment these days is very anti-sex work, like, ‘oh, if you make this content, you’re kind of putting yourself up to be distributed in this way.’ But the thing it is, is it’s 100% a slippery slope and it’s going to keep going into Twitch streamers who are known and even just normal people. There’s nothing stopping anybody from pulling up somebody’s Facebook profile, just a normal person who doesn’t produce any content whatsoever, and making explicit deep fakes of them and distributing them. And that can be used as blackmail. The possibilities there are quite literally endless in terms of the harm that they could cause for everybody.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">You’ve talked about spending so much money on deepfake takedowns, but how did you initially try to tackle this problem of deepfakes and leaked content?\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">I was going in every single week and I was Googling my name and I was going on like Twitter, Reddit, all these other sites, just like searching for my name, um, and seeing pages and pages and pages of all this leaked content that would come up. And back then I was paying over a thousand dollars a month on these takedowns, but I would still have to go in and manually report a lot of stuff. You shouldn’t really have to go in and look at your own leaks and your own deep fakes, which is just awful.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Morgan was at her wits end. And then, late last year, she saw that Zander was working on a project that may be able to solve her problem. And she wanted to help. We’ll hear Zander’s story after the break.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">But first, we wanted to remind you that Close All Tabs depends on listeners like you to keep us going. You can support us by becoming a member at donate.kqed.org slash podcasts. Okay, back to the story after the break. Stick around.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">We’re back. Now, let’s open that new tab: What is Fanlock?\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Zander had started out as a Minecraft YouTuber back in high school. It was a fun thing he did on the side before he went to college to study software engineering. He was on his high school robotics team and loved tinkering and fixing things. A few years ago, during his sophomore year, he started going to anime conventions with his friends. Here’s the thing, Zander’s really tall. He’s 6’8″. His friend pointed out that he could carve out a real niche as a comically tall cosplayer. He pushed Zander to start posting.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">He was like ‘Bro, it’s gonna be like viral because like, oh my gosh, why is a Gojo cosplayer like as tall as like LeBron James?’ So I did it and it did pretty good. And I guess it just snowballed from there and I just haven’t stopped since.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">About six months after he went viral as comically tall Gojo, he started getting brand deals from anime companies. He gained hundreds of thousands of followers. He flew all over the country, attending cons and meetups. He even hosted a few lookalike competitions. There’s a picture of the Hatsune Miku lookalite competition he hosted. A gaggle of cosplayers in turquoise wigs, and then Zander, towering above the crowd in his own turquois getup. Of course, he was still in school juggling a burgeoning full-time career as a content creator while also attending classes and doing homework and studying for exams.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">He considered dropping out, but his parents really, really wanted him to stay in school. They weren’t thrilled at the idea of their son leaving an engineering degree to pursue anime content. So he stuck it out, and last year, while finishing up his last semester of school, He stumbled across this deep fake problem. It struck a very personal chord.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">So about a month before I graduated, my girlfriend, who’s an SFW creator, had a huge deep fake problem. Um, you know, there’s accounts popping up on like Threads or Instagram that either use her likeness or just full on non-consensual porn, uh, deep fakes of her, which is super mentally taxing, uh on her, you know, as an SF W creator. You know, she didn’t consent to being in those positions or having these account to DM her fans, like, ‘Hey, send me $400 and we’ll go on a date,’ type of just scam content. So it was from there that I was like, let me see what’s up and see if I can help you. So that’s when I really took a deep dive into DMCA, non-consensual imagery and depending on the platform it’s on what you can do about that.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">DMCA, as in the Digital Millennium Copyright Act. It’s copyright law for internet content.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">And I was able to get a lot of her stuff down, which was great. Uh, but then at that point it was like, you know, what are the other players in the space doing about this?\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">What he found were takedown tools that were very expensive and not that effective. While creator management firms and talent agencies have in-house services for this, they’re inaccessible to smaller creators. After Zander helped his girlfriend, her friends reached out to him. They had the same problem. And then their friends reached out. And all of this coincided with his post-graduation job search. He planned to at least try to use his degree. But the job market for entry-level software engineers was rough.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">I think by the third final round interview at like some fang company where they rejected me after four weeks and five interviews, I was just so fed up. I was like, you know what, screw this. I’m gonna just do this myself. I’ma make my own company. So, and at that time, you it’s like the overlap of like, oh, I figured out how to do this. I could help more creators like this and really solve a real problem.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">And so, he started working on it, a tool for creators that would scan the internet for leaked and deepfaked content and automatically send DMCA takedown requests. And if the sites didn’t comply, this tool would have to find other ways to force a takedow.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Zander knew how traumatic it was for his girlfriend and her friends to be constantly confronted with non-consensual deepfake porn. So, he wanted this tool to take down content automatically, without creators having to see it. And the tool also had to catch the non-consensual deepfakes before they spread to other platforms. But he knew he couldn’t do it alone. He needed the perspective of other creators for it to really work.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">I pretty much just posted on my close friends at some point, like, hey, I’m thinking about doing this as like an actual like business or something like that. If anyone will be down to just test it out for free and see how good like my, you know, scanning architecture and stuff like that is, let me know. And Morgan actually swiped up on the story and was like, hey, that actually sounds pretty neat. I’d be down.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Morgan and Zander had met at TwitchCon a while back.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">And we hopped on a call and I was like, ‘would you be down to like do this with me?’ Cause like, I think it’d be pretty sick if we had like two creators doing it that know the problem. You know, Morgan knows firsthand, like the adult space, but as well as like a firsthand account of like leaks and deep fakes and you know, where they live and stuff like that. And you know I guess from there, it just was one of those things where it was like I think this could be a real player in the space and I’m really passionate about it.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Morgan, what was it like for you to see that story?\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">I’ve struggled with this stuff for so long. I know so many people that I could tap in on and get their feedback. My scope in this space is so wide because I’ve had my eggs in so many baskets online And that I knew that I would be able to bring a good perspective and good input.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">So they managed to raise $200,000, and with that, Morgan and Zander launched Fanlock earlier this year. Zander handles the technical side, making sure Fanlock works, and Morgan handles the creator side, managing outreach to other creators.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">I guess I get to apply that degree that I was considering dropping out to do content for. And it’s, I guess like a full 360, you know, everyone that was like, you should stay in school and finish it out. I guess it came back to be useful because now I can apply it to helping my friends and other people in the space with this really real problem that they have.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">This solution isn’t that straightforward though. That’s a new tab: Why is it so hard to take down deepfakes?\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">In May last year, President Trump signed the Take It Down Act, a landmark law that criminalizes the publication of non-consensual intimate imagery, including digital forgeries, aka deep fake porn. It’s one of Congress’s first bipartisan actions to tackle AI-generated content. The law also requires online platforms to implement a removal request system and to take down deep fake porn within 48 hours of a request.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">A lot of these sites thankfully already had like forms or different reporting mechanisms to report deepfakes, but I think with this act itself, it’s a really good step in the right direction to combat non-consensual deepfake and, you know, props to the government for doing something right for once and actually passing this really quickly.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">So the Take It Down Act is only enforceable under U.S. jurisdiction, although the EU also has similar laws. But a lot of these sites are based outside of these places, like in Russia.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, so for like Russia and Chinese sites, it gets a lot harder because they don’t have any need to comply either like deep fake penalties or DMCA because it’s specifically like USA, EU jurisdiction typically. And that makes it a lot hard to get content down off those sites if it’s even possible at all.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">There’s a few things you can do for these sites. There’s been some sites I know firsthand that they use, let’s say, a USA-based company for their notification system. We’re able to submit basically a DMCA to those companies, basically being like, hey, just so you know, you’re aiding in copyright infringement by working with this client. If we were to take it a step further, we could always issue a DMC subpoena to them if they use Google Analytics, for example, to straight to Google. And that would help us get more information about… The actual emails of the site, who this person actually is. So if they’re in the EU or USA, we can take those legal routes. Obviously there’s sites I know that are pretty much, they’re built from the ground up for piracy and it’s pretty much impossible to get those stuff down.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">One of the more difficult aspects of tackling deepfakes is catching them before Google indexes them, basically, storing web pages in its own database so they appear in search results. Because when something appears in search results, it spreads like wildfire.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Google updated its search functions a few years ago to identify deepfakes and prevent them from appearing at the top of search results, but there are still deepfakess that slip through the cracks. Zander said that Fanlock keeps tabs on specific sites that have histories of hosting non-consensual deepfakes. They scan them and send takedown demands, before they hit Google search results.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">You know, no one wants their family Googling them or something and they see deep fakes of them all over Google Images.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">I know Fanlock also relies on a lot of facial recognition technology to identify leaked content and deepfakes. Obviously, this technology is very controversial. It’s often used in law enforcement and has a lot connections to surveillance. But what are your thoughts on this use of facial-recognition technology?\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, I mean, obviously, if a creator signs up for our platform and we’re doing it in a consensual manner, I think that’s great. I obviously am big anti-surveillance, but I think the the key word at the end of the day is just consent, which is like the fundamental problem that I think these creators are having. And if they’re consenting to a service to take down stuff that was made non-consensually, I think, that’s why our creators are okay with it. And I think there’s a big differentiation between that and then, you know, some tech company scanning my face to see if I’m a criminal or something like that.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">I want to talk about some of the technical challenges that still exist. You mentioned trying to build a Telegram scanner right now. A lot of non-consensual deep fake porn is passed around in closed channels on Discord or group chats or Telegram. Do either of you have any experience with this happening? Like, what is the approach here?\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">When we were building FanLock, I was like, Telegram is, like, the final boss of piracy. I really want to build a solution that while we can’t scan a hundred percent of Telegram, I want to build the absolute most, like I guess comprehensive Telegram scanner we can based on like what’s publicly available and what providers there are to us. So for Telegram, typically for like private groups and stuff like that, you’re able to join them if you have like a join link, which we’ve kind of gotten from people being like, ‘hey, I got leaks here, join my channel.’ And after we get the join link we’re able to figure out where copyrighted content is.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">We already do have our Telegram scanner up. You know, we have about 11 million channels, you know, from our own services, but also third party providers that we use that have kind of indexed Telegram for us, which is great. Discord is a little bit trickier because it’s a TOS breach to use any sort of like bot activity on that.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\"> \u003c/span>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">TOS is Terms of Service, the contract between a platform like Telegram and its users.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">For now, like on Discord, if someone has a link that they’ve noticed that they want down, they can submit it to us and then we can do it from there. We currently don’t scan Discord because it is like a TOS breach to do, but we’re hoping as, like I said, as we grow that door can open.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Overall, what hurdles still exist when it comes to taking down deepfakes? Like what’s the kind of like technical white whale you’re still chasing?\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, I’d say the biggest thing that we’re trying to roll out is actually identifying who leaked or who deep faked XYZ content. I think if we were able to do that, we might, I wouldn’t say solve the piracy problem, but definitely lower it. You know, we’re really hoping we can get in talks with, you know, platforms like OF, Fansly or Instagram and stuff like that, uh, to roll out a technology that we’re working on where basically it embeds like an invisible watermark into different images and stuff like that.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">So if it is leaked or if it has deep faked or if someone else’s face has put on it, they’re able to know who exactly posted it based off this invisible embedded technology, which already exists for sites like Netflix. It’s how they track like video, uh, I guess leaks or, you know, from studios that maybe have like a trailer for the new Avengers movie and they want to track if it got leaked on X or anything like that.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">I think if we’re able to get that done, like I feel like we’d significantly fix the problem and be a lot more proactive. Because I mean, if people start realizing, ‘oh shoot, if I leak or deep fake content, my account gets banned. Like, it’s going to really throw a wrench in the whole leak ecosystem. And that’s what we’re really trying to build towards right now.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">You’re coming from very different sides of the internet, kind of, whether in the safe work side or the adult content industry. But this is also a problem that deeply affects both of your spheres of the creator economy. How has the proliferation of deepfake porn changed the creator industry for you? And what would you say to someone who’s afraid to keep posting?\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">The unfortunate thing is it’s such an uphill battle when it comes to deepfaked and leaked content, especially with AI getting as good as it is right now. But to somebody who is kind of scared to post right now, just know that there are people who are trying to find solutions to this kind of stuff.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">And for these people who are generating this kind of content, it’s very much about their own sense of control. It doesn’t reflect you as a creator. You shouldn’t be afraid to post what you want because of this horrible threat of somebody taking your content and basically twisting it into something that you didn’t consent to. And hopefully our government can kind of catch up with this kind of stuff here pretty soon. But there are people like me and Zander who are trying to take real steps to help mitigate this.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">For creators, I’d say, you know, if you need to, you know, get anything you need for support on it, do it. You know, if you need to take a step back, do it. And then I’d say like, it’s a twofold thing where it’s like, don’t glamorize generative AI video and image content because that only speeds up the industry and then really push for better legislation and, you know, call your Senator, call your Congressman, like get it passed. Because It’s only going to get worse as it gets easier and it’s able to be done for more people. I think those are probably the two biggest things a creator can do right now that has like an actual like tangible impact to halt this problem or make it slow down at least.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Well, thank you both so much for talking about all of this.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, thank you for having us.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, for sure.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">If you or someone you know has been targeted with deep fake porn, there are ways to have it removed. Fanlock also has free guides for creators navigating this problem. Check the show notes for more. We’ll link to a few resources about the Take It Down Act and how to remove non-consensual intimate imagery. For now, let’s close all these tabs.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Close All Tabs is a production of KQED Studios and is reported and hosted by me, Morgan Sung. This episode was produced by Chris Egusa, who also composed our theme song and credits music. It was edited by Chris Hambrick. The Close All tabs team also includes producer Maya Cueva and audio engineer, Brendan Willard. Additional music by APM. Audience engagement support from Maha Sanad. Jen Chien is our director of podcasts and Ethan Toven-Lindsey is our editor in chief.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Some members of the KQED podcast team are represented by the Screen Actors Guild, American Federation of Television and Radio Artists, San Francisco, Northern California local.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Keyboard sounds were recorded on my purple and pink dust silver K84 wired mechanical keyboard with Gateron red switches. Thanks for listening.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\"> \u003c/span>\u003c/p>\n\u003cp> \u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "Two gen Z content creators are fighting back against nonconsensual deepfake porn, a growing tool for harassment.",
"status": "publish",
"parent": 0,
"modified": 1778827306,
"stats": {
"hasAudio": true,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 102,
"wordCount": 6194
},
"headData": {
"title": "How an OnlyFans Model and a Cosplayer Are Fighting Nonconsensual Deepfake Porn | KQED",
"description": "We’re diving into the world of nonconsensual deepfake porn and why this problem reaches far beyond influencers and sex workers.When users on X started asking Grok to generate explicit images of real women and girls without their consent, Twitch streamer and OnlyFans creator Morgpie watched the harassment spiral in real time. Cosplayer and software engineer Zander Small saw firsthand how nonconsensual images affected his girlfriend, a SFW creator, and her friends. The two decided to team up to build tools that help creators detect leaks, remove deepfakes, and reclaim control over their images online.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"socialDescription": "We’re diving into the world of nonconsensual deepfake porn and why this problem reaches far beyond influencers and sex workers.When users on X started asking Grok to generate explicit images of real women and girls without their consent, Twitch streamer and OnlyFans creator Morgpie watched the harassment spiral in real time. Cosplayer and software engineer Zander Small saw firsthand how nonconsensual images affected his girlfriend, a SFW creator, and her friends. The two decided to team up to build tools that help creators detect leaks, remove deepfakes, and reclaim control over their images online.",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "How an OnlyFans Model and a Cosplayer Are Fighting Nonconsensual Deepfake Porn",
"datePublished": "2026-05-13T03:00:07-07:00",
"dateModified": "2026-05-14T23:41:46-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 33520,
"slug": "podcast",
"name": "Podcast"
},
"source": "Close All Tabs",
"sourceUrl": "https://www.kqed.org/podcasts/closealltabs",
"audioUrl": "https://traffic.megaphone.fm/KQINC5643980688.mp3?updated=1778638050",
"sticky": false,
"nprStoryId": "kqed-12083428",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12083428/how-an-onlyfans-model-and-a-cosplayer-are-fighting-nonconsensual-deepfake-porn",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>\u003ca href=\"#episode-transcript\">\u003ci>View the full episode transcript.\u003c/i>\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">We’re \u003c/span>\u003cspan style=\"font-weight: 400\">diving into the world of nonconsensual deepfake porn and why this problem reaches far beyond influencers and sex workers.\u003c/span>\u003cspan style=\"font-weight: 400\">\u003cbr>\n\u003c/span>\u003cspan style=\"font-weight: 400\">\u003cbr>\n\u003c/span>\u003cspan style=\"font-weight: 400\">When users on X started asking Grok to generate explicit images of real women and girls without their consent, Twitch streamer and OnlyFans creator Morgpie watched the harassment spiral in real time. Cosplayer and software engineer Zander Small saw firsthand how nonconsensual images affected his girlfriend, a SFW creator, and her friends. The two decided to team up to build tools that help creators detect leaks, remove deepfakes, and reclaim control over their images online.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Note:\u003c/b>\u003cspan style=\"font-weight: 400\"> This episode contains mentions of gender-based violence and nonconsensual intimate imagery, which may be triggering for some listeners. \u003c/span>\u003c/p>\n\u003cp>\u003c!-- iframe plugin v.4.3 wordpress.org/plugins/iframe/ -->\u003cbr>\n\u003ciframe loading=\"lazy\" frameborder=\"0\" height=\"200\" scrolling=\"no\" src=\"https://playlist.megaphone.fm?e=KQINC5643980688\" width=\"100%\" class=\"iframe-class\">\u003c/iframe>\u003c/p>\n\u003cp>\u003cstrong>Guest:\u003c/strong>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://www.instagram.com/bigguswombus/\">\u003cspan style=\"font-weight: 400\">Morgpie\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">, OnlyFans creator and cofounder of Fanlock\u003c/span>\u003c/li>\n\u003cli>\u003ca href=\"https://www.instagram.com/zander_smalls/\">\u003cspan style=\"font-weight: 400\">Zander Small\u003c/span>\u003c/a>\u003cb>, \u003c/b>\u003cspan style=\"font-weight: 400\">content creator and cofounder of Fanlock\u003c/span>\u003c/li>\n\u003c/ul>\n\u003cp>\u003cb>Further Reading/Listening:\u003c/b>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://www.dexerto.com/twitch/influencers-take-on-ai-deepfakes-with-new-creator-protection-agency-3324719/\">\u003cspan style=\"font-weight: 400\">Influencers take on AI deepfakes with their own creator protection agency\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Virginia Glaze, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">Dextero\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.nbcnews.com/tech/tech-news/musks-ai-chatbot-grok-xai-making-sexual-deepfakes-imagine-rcna265855\">\u003cspan style=\"font-weight: 400\">Musk’s Grok AI chatbot is still making sexual deepfakes, despite X’s promise to stop it\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — David Ingram, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">NBC News\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://www.wired.com/story/deepfake-nudify-schools-global-crisis/\">\u003cspan style=\"font-weight: 400\">The Deepfake Nudes Crisis in Schools Is Much Worse Than You Thought\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Matt Burgess, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">WIRED\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://19thnews.org/2025/05/take-it-down-act-signing-explicit-images\">\u003cspan style=\"font-weight: 400\">Take It Down Act: How to use it to remove revenge porn\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Jasmine Mithani, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">The 19th\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://rainn.org/rainns-recommendations-for-legislators/image-based-sexual-abuse-laws-combat-nonconsensual-ai-deepfakes/\">\u003cspan style=\"font-weight: 400\">Image-Based Sexual Abuse Laws: Combat Nonconsensual AI Deepfakes\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">RAINN\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ca href=\"https://rainn.org/get-informed/issues/ai-tech-enabled-sexual-abuse/\">\u003cspan style=\"font-weight: 400\">AI & Tech-Enabled Sexual Abuse: Risk & Prevention\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">RAINN\u003c/span>\u003c/i>\u003cbr>\n\u003ca href=\"https://deepstrike.io/blog/deepfake-statistics-2025\">\u003cspan style=\"font-weight: 400\">Deepfake Statistics 2025: AI Fraud Data & Trends\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Mohammed Khalil, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">DeepStrike\u003c/span>\u003c/i>\u003c/li>\n\u003c/ul>\n\u003cp>Want to give us feedback on the show? Shoot us an email at \u003ca href=\"mailto:CloseAllTabs@KQED.org\">CloseAllTabs@KQED.org\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Follow us on\u003c/span>\u003ca href=\"https://www.instagram.com/closealltabspod/\"> \u003cspan style=\"font-weight: 400\">Instagram\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> and\u003c/span>\u003ca href=\"https://www.tiktok.com/@closealltabs\"> \u003cspan style=\"font-weight: 400\">TikTok\u003c/span>\u003c/a>\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-content post-body\">\u003ch2 id=\"episode-transcript\">Episode Transcript\u003c/h2>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">This is a computer-generated transcript. While our team has reviewed it, there may be errors.\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Are you closing your tabs? You can be honest, this is a safe space. If you’re a fan of Close All Tabs and you want more of it, then please rate and review us on Spotify, Apple Podcasts, or wherever you listen to the show. And tell your friends about us. It would be such a huge help to get the word out.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Okay, let’s get to the show. Just a note, this episode contains mentions of gender-based violence and non-consensual intimate imagery, which may be triggering for some listeners.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">So, you know Grok? It’s the AI chatbot integrated with X, the social media app formerly known as Twitter and now owned by Elon Musk. Well, since late last year, Grok has been embroiled in an undressing scandal, generating sexually explicit images of people without their consent. The majority of targets were women. Some were minors, young girls. For a few weeks, it was a pretty disgusting widespread trend. When women or even teenage girls posted fully clothed photos of themselves on X, other users would comment and tag Grok, asking it to ‘put her in a bikini’ or ‘take off her top.’ The chatbot would publicly respond with a generated lewd or completely naked image of the subject. Some users went even further, asking Grok to add blood and bruises, prompting the chatbot to generate graphic, sexually violent images of these women.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">Oh man, it was very much like I was waking up every day and I didn’t want to post.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">This is Morgpie, a Twitch streamer and OnlyFans creator. People who know her IRL call her Morgan. She’s been a porn actress for years, and as someone who makes sexually explicit content, she’s used to creeps harassing her with her own nudes. But the Grok and dressing trend really unsettled her. It was the worst in January.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">Being looped in with something that is so violating, and like you said, something that’s even affecting minors is just disgusting. Every day I was going into my comments and just like hiding replies and blocking because I’m like, I’m not going to let you guys just generate these images of me that I did not consent to, especially if it’s being associated with basically creating child pornography on Twitter.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">This was non-consensual, intimate imagery, more commonly known as deep fake porn. A deep fake is content that has been generated or manipulated by AI to imitate someone else. Zander Small, another content creator and a friend of Morgan’s, says that the proliferation of AI tools has started to seriously affect content creators, regardless of whether or not they make adult content.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">Deep fakes can be anything from deep fake explicit imagery with like, a creator doing something or nude content that they didn’t consent to. Or it could be stuff as simple as like, an audio deep fake where a creator is saying something that they don’t consent too, which might have repercussions of them being canceled or stuff that they just obviously wouldn’t consent to saying.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Morgan hasn’t had to deal with deep fake porn of herself as much. After years of being in this industry, she’s developed thick skin. She’s mostly dealt with leaks, or explicit content that she posted behind a paywall that was illegally downloaded and posted elsewhere, without her consent. But the Grok trend is just the tip of the iceberg. Non-consensual deep fake-porn has exploded over the last few years.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">I think that for a lot of people, the lack of consent is very attractive.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">This is an issue that overwhelmingly affects women, and these circles are not as fringe as you might think. An annual report last year by the cybersecurity firm DeepStrike found that roughly 97% of all deepfakes online fall under non-consensual intimate imagery, and that 99 to 100% of victims of deepfake pornography are women. Here’s Zander again.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">I think it is either fans, if you want to call them that, or just creeps on the internet, wanting to see more out of a creator than they consented to. I know it affects a lot of SFW creators.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">SFW, or Safe for Work. They don’t show nudity or make sexually explicit content. While NSFW, not Safer work, means adult content.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">Uh, you know, and I guess from that, you know, if a creator isn’t consenting to do more explicit content, then, you know, these, uh, I guess perpetrators, creeps, whatever you want to call them, you know, take into their hands to do it themselves. And it’s incredibly easy to deep fake content and, you know, as models get better and better and they get quicker and quicker, it doesn’t really require as much of sophisticated technology to run these models.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Some of the mainstream models, ChatGPT, Gemini, Claude, have guardrails that are supposed to prevent them from generating deep fake porn. In January, X announced that it implemented technological measures to prevent Grok from modifying images of real people in revealing clothing. But there are ways to get around these guardraills. Just last month, NBC News reported that Grok is still generating deep-fake porn of real women. And like Zander said, there are so many other models out there that just don’t have these guardrails in the first place.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Zander hasn’t had to deal with deep fake porn of himself, but he’s seen how much it’s affected people he’s close to, other safer work creators who don’t make explicit content. And Morgan, coming from the porn industry, has seen how this issue affects her fellow adult content creators.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">So late last year, they teamed up to come up with a solution for other creators. Today, we’re diving into the seedy reality of non-consensual deepfake porn, when it got so bad, why it’s so hard to stop, and how two Gen Z content creators are trying to tackle it. Ready?\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">This is Close All Tabs. I’m Morgan Sung, tech journalist and your chronically online friend, here to open as many browser tabs as it takes to help you understand how the digital world affects our real lives. Let’s get into it.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Let’s open our first tab: the reality of non-consensual deep fake porn. Morgan is an award-winning porn creator. Literally, she has multiple Pornhub awards. And when she started years ago, the internet was very different.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">It’s very interesting because when I first started, the climate was very much like, if you opened up Twitter, you would see tweets that are like, ‘sex work is real work.’ Of course, this was kind of around the time when OnlyFans was only just emerging barely.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">In the world of adult content, there was before OnlyFans, and then there’s after OnlyFans. The platform completely changed the game, lowering the barrier of entry for new creators and giving them new options to monetize their content. Morgan said that before OnlyFans blew up, the only way to make a living as an independent porn creator was to land on the front page of Pornhub, or actresses had to break into the industry by being part of studio productions where they didn’t have as much autonomy.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">It’s very interesting the shift between whenever porn was basically widely available, you didn’t really have to pay much for it. When I first started, I was uploading to Pornhub, and that was full length, full scenes that you could see for free at any time. Whereas now, the climate has shifted a lot to where creators like myself have a lot more control. So we’re able to, you know, use OnlyFans as a platform where we are more connected with our audience and that is actually the main pull. Now we’re in this age where these models can kind of take a bit of that control back. They can control what content they make, how much they sell it for. And I think that that plays so much into like the conversation about deepfakes where it’s about control. It’s all about consent. And then with deepfakes, you can make anybody do anything. So you have the control over this other person.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Since joining OnlyFans, Morgan and other adult creators have dealt with the same problem: leaks. They consent to paying subscribers accessing certain premium content that’s been posted behind a paywall. Then some unscrupulous subscriber downloads it and posts it publicly without their permission for the rest of the world to see. It was a constant source of frustration for Morgan.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">And then about a year and a half ago, Morgan noticed the deep fakes. Her friends told her about how they stumbled across videos of themselves online, but it wasn’t really them. Someone had taken explicit content from behind their paywalls and modified it, morphing them into these scenarios that the creators never wanted to be in.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">Again, it all stems back to control. It’s like, ‘oh, you did this thing that I didn’t like. Well, look at this control I have over your image. I’m going to use that against you.’.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">I think some detractors would say, like, ‘oh, well, if you make explicit content, why does deep fake porn bother you? Or why do your leaks bother you?’ What would you say to them?\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">I mean, it’s it’s all about consent. That’s like saying, ‘oh, because you make porn, if I see you on the street, I can sexually assault you.’ You know, it’s like, consent is a very real thing. And there’s a big difference between me in the comfort of my own home within my own boundaries, producing content that I enjoy, and somebody else taking these things and making content that I didn’t consent to be in.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">It’s not just Morgan and her fellow porn actresses dealing with this. Women who don’t make explicit content are also subjected to this harassment. One of the most well-known cases of this was when Atrioc, a Twitch streamer, was live. During his stream, he showed his open tabs for a split second, and one of them included deep fake porn of his own friends and streaming colleagues. He was caught buying this content. QTCinderella, another streamer was one of Atriok’s close friends. She was also a victim of the deep fake porn he bought.\u003c/span>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">[Audio Clip of QTCinderella]\u003c/span>\u003c/i>\u003cb>\u003cbr>\n\u003c/b>\u003ci>\u003cspan style=\"font-weight: 400\">Atrioc for showing it to thousands of people, the people DMing me pictures of myself from that website, f*ck you all!\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">Pokimane is like a great example of this.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Pokimane is another Twitch streamer who was also a victim of atriox deep fake porn purchases. She does not make explicit adult content, but as a woman existing online, she deals with harassment constantly. Like, here she is reading comments from her audience.\u003c/span>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">[Audio clip of Pokimane]\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">Yo yo yo, let’s see some ass. This ain’t a club fam, this is just my Twitch chat.\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">There are so many clips of her literally just getting up and standing up out of her chair and that’ll get clipped and posted all over Twitter. And all of Twitter is like, ‘look at what she’s doing. She’s gooner baiting!’.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Goonerbait started as a term to describe video games or anime that aren’t pornographic but contain a lot of sexual imagery like jiggle physics and very scantily clad female characters. It’s media designed to appeal to gooners. Gooners are porn addicts. And now, internet randos love to accuse real-life women of gooner baiting, mainly female streamers like Pokimane.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">‘She’s, you know, performing for her male audience.’ And it’s like, well, is she really doing anything? She kind of just got up and walked out of the room, but they’re like, ‘oh, her pants are a little too tight.’ So it’s, like, I think this idea of a woman that’s kind of, just not really even doing anything, a lot of people love to just over-sexualize.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">In some online circles, there is the sentiment that women like Morgan deserve to be deepfaked because they already make porn, and that women, like Pokimane, also deserve to deepfake because they’re somehow gooner baiting. It even affects people who don’t post online. Non-consensual deepfakes are rampant in schools. A Wired investigation last month found that high school boys have targeted their fellow classmates by spreading fake, generated nudes of them. These are teenage girls.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, the thing is, it’s not going to stop with sex workers. As much as the sentiment these days is very anti-sex work, like, ‘oh, if you make this content, you’re kind of putting yourself up to be distributed in this way.’ But the thing it is, is it’s 100% a slippery slope and it’s going to keep going into Twitch streamers who are known and even just normal people. There’s nothing stopping anybody from pulling up somebody’s Facebook profile, just a normal person who doesn’t produce any content whatsoever, and making explicit deep fakes of them and distributing them. And that can be used as blackmail. The possibilities there are quite literally endless in terms of the harm that they could cause for everybody.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">You’ve talked about spending so much money on deepfake takedowns, but how did you initially try to tackle this problem of deepfakes and leaked content?\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">I was going in every single week and I was Googling my name and I was going on like Twitter, Reddit, all these other sites, just like searching for my name, um, and seeing pages and pages and pages of all this leaked content that would come up. And back then I was paying over a thousand dollars a month on these takedowns, but I would still have to go in and manually report a lot of stuff. You shouldn’t really have to go in and look at your own leaks and your own deep fakes, which is just awful.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Morgan was at her wits end. And then, late last year, she saw that Zander was working on a project that may be able to solve her problem. And she wanted to help. We’ll hear Zander’s story after the break.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">But first, we wanted to remind you that Close All Tabs depends on listeners like you to keep us going. You can support us by becoming a member at donate.kqed.org slash podcasts. Okay, back to the story after the break. Stick around.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">We’re back. Now, let’s open that new tab: What is Fanlock?\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Zander had started out as a Minecraft YouTuber back in high school. It was a fun thing he did on the side before he went to college to study software engineering. He was on his high school robotics team and loved tinkering and fixing things. A few years ago, during his sophomore year, he started going to anime conventions with his friends. Here’s the thing, Zander’s really tall. He’s 6’8″. His friend pointed out that he could carve out a real niche as a comically tall cosplayer. He pushed Zander to start posting.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">He was like ‘Bro, it’s gonna be like viral because like, oh my gosh, why is a Gojo cosplayer like as tall as like LeBron James?’ So I did it and it did pretty good. And I guess it just snowballed from there and I just haven’t stopped since.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">About six months after he went viral as comically tall Gojo, he started getting brand deals from anime companies. He gained hundreds of thousands of followers. He flew all over the country, attending cons and meetups. He even hosted a few lookalike competitions. There’s a picture of the Hatsune Miku lookalite competition he hosted. A gaggle of cosplayers in turquoise wigs, and then Zander, towering above the crowd in his own turquois getup. Of course, he was still in school juggling a burgeoning full-time career as a content creator while also attending classes and doing homework and studying for exams.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">He considered dropping out, but his parents really, really wanted him to stay in school. They weren’t thrilled at the idea of their son leaving an engineering degree to pursue anime content. So he stuck it out, and last year, while finishing up his last semester of school, He stumbled across this deep fake problem. It struck a very personal chord.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">So about a month before I graduated, my girlfriend, who’s an SFW creator, had a huge deep fake problem. Um, you know, there’s accounts popping up on like Threads or Instagram that either use her likeness or just full on non-consensual porn, uh, deep fakes of her, which is super mentally taxing, uh on her, you know, as an SF W creator. You know, she didn’t consent to being in those positions or having these account to DM her fans, like, ‘Hey, send me $400 and we’ll go on a date,’ type of just scam content. So it was from there that I was like, let me see what’s up and see if I can help you. So that’s when I really took a deep dive into DMCA, non-consensual imagery and depending on the platform it’s on what you can do about that.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">DMCA, as in the Digital Millennium Copyright Act. It’s copyright law for internet content.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">And I was able to get a lot of her stuff down, which was great. Uh, but then at that point it was like, you know, what are the other players in the space doing about this?\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">What he found were takedown tools that were very expensive and not that effective. While creator management firms and talent agencies have in-house services for this, they’re inaccessible to smaller creators. After Zander helped his girlfriend, her friends reached out to him. They had the same problem. And then their friends reached out. And all of this coincided with his post-graduation job search. He planned to at least try to use his degree. But the job market for entry-level software engineers was rough.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">I think by the third final round interview at like some fang company where they rejected me after four weeks and five interviews, I was just so fed up. I was like, you know what, screw this. I’m gonna just do this myself. I’ma make my own company. So, and at that time, you it’s like the overlap of like, oh, I figured out how to do this. I could help more creators like this and really solve a real problem.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">And so, he started working on it, a tool for creators that would scan the internet for leaked and deepfaked content and automatically send DMCA takedown requests. And if the sites didn’t comply, this tool would have to find other ways to force a takedow.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Zander knew how traumatic it was for his girlfriend and her friends to be constantly confronted with non-consensual deepfake porn. So, he wanted this tool to take down content automatically, without creators having to see it. And the tool also had to catch the non-consensual deepfakes before they spread to other platforms. But he knew he couldn’t do it alone. He needed the perspective of other creators for it to really work.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">I pretty much just posted on my close friends at some point, like, hey, I’m thinking about doing this as like an actual like business or something like that. If anyone will be down to just test it out for free and see how good like my, you know, scanning architecture and stuff like that is, let me know. And Morgan actually swiped up on the story and was like, hey, that actually sounds pretty neat. I’d be down.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Morgan and Zander had met at TwitchCon a while back.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">And we hopped on a call and I was like, ‘would you be down to like do this with me?’ Cause like, I think it’d be pretty sick if we had like two creators doing it that know the problem. You know, Morgan knows firsthand, like the adult space, but as well as like a firsthand account of like leaks and deep fakes and you know, where they live and stuff like that. And you know I guess from there, it just was one of those things where it was like I think this could be a real player in the space and I’m really passionate about it.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Morgan, what was it like for you to see that story?\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">I’ve struggled with this stuff for so long. I know so many people that I could tap in on and get their feedback. My scope in this space is so wide because I’ve had my eggs in so many baskets online And that I knew that I would be able to bring a good perspective and good input.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">So they managed to raise $200,000, and with that, Morgan and Zander launched Fanlock earlier this year. Zander handles the technical side, making sure Fanlock works, and Morgan handles the creator side, managing outreach to other creators.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">I guess I get to apply that degree that I was considering dropping out to do content for. And it’s, I guess like a full 360, you know, everyone that was like, you should stay in school and finish it out. I guess it came back to be useful because now I can apply it to helping my friends and other people in the space with this really real problem that they have.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">This solution isn’t that straightforward though. That’s a new tab: Why is it so hard to take down deepfakes?\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">In May last year, President Trump signed the Take It Down Act, a landmark law that criminalizes the publication of non-consensual intimate imagery, including digital forgeries, aka deep fake porn. It’s one of Congress’s first bipartisan actions to tackle AI-generated content. The law also requires online platforms to implement a removal request system and to take down deep fake porn within 48 hours of a request.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">A lot of these sites thankfully already had like forms or different reporting mechanisms to report deepfakes, but I think with this act itself, it’s a really good step in the right direction to combat non-consensual deepfake and, you know, props to the government for doing something right for once and actually passing this really quickly.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">So the Take It Down Act is only enforceable under U.S. jurisdiction, although the EU also has similar laws. But a lot of these sites are based outside of these places, like in Russia.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, so for like Russia and Chinese sites, it gets a lot harder because they don’t have any need to comply either like deep fake penalties or DMCA because it’s specifically like USA, EU jurisdiction typically. And that makes it a lot hard to get content down off those sites if it’s even possible at all.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">There’s a few things you can do for these sites. There’s been some sites I know firsthand that they use, let’s say, a USA-based company for their notification system. We’re able to submit basically a DMCA to those companies, basically being like, hey, just so you know, you’re aiding in copyright infringement by working with this client. If we were to take it a step further, we could always issue a DMC subpoena to them if they use Google Analytics, for example, to straight to Google. And that would help us get more information about… The actual emails of the site, who this person actually is. So if they’re in the EU or USA, we can take those legal routes. Obviously there’s sites I know that are pretty much, they’re built from the ground up for piracy and it’s pretty much impossible to get those stuff down.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">One of the more difficult aspects of tackling deepfakes is catching them before Google indexes them, basically, storing web pages in its own database so they appear in search results. Because when something appears in search results, it spreads like wildfire.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Google updated its search functions a few years ago to identify deepfakes and prevent them from appearing at the top of search results, but there are still deepfakess that slip through the cracks. Zander said that Fanlock keeps tabs on specific sites that have histories of hosting non-consensual deepfakes. They scan them and send takedown demands, before they hit Google search results.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">You know, no one wants their family Googling them or something and they see deep fakes of them all over Google Images.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">I know Fanlock also relies on a lot of facial recognition technology to identify leaked content and deepfakes. Obviously, this technology is very controversial. It’s often used in law enforcement and has a lot connections to surveillance. But what are your thoughts on this use of facial-recognition technology?\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, I mean, obviously, if a creator signs up for our platform and we’re doing it in a consensual manner, I think that’s great. I obviously am big anti-surveillance, but I think the the key word at the end of the day is just consent, which is like the fundamental problem that I think these creators are having. And if they’re consenting to a service to take down stuff that was made non-consensually, I think, that’s why our creators are okay with it. And I think there’s a big differentiation between that and then, you know, some tech company scanning my face to see if I’m a criminal or something like that.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">I want to talk about some of the technical challenges that still exist. You mentioned trying to build a Telegram scanner right now. A lot of non-consensual deep fake porn is passed around in closed channels on Discord or group chats or Telegram. Do either of you have any experience with this happening? Like, what is the approach here?\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">When we were building FanLock, I was like, Telegram is, like, the final boss of piracy. I really want to build a solution that while we can’t scan a hundred percent of Telegram, I want to build the absolute most, like I guess comprehensive Telegram scanner we can based on like what’s publicly available and what providers there are to us. So for Telegram, typically for like private groups and stuff like that, you’re able to join them if you have like a join link, which we’ve kind of gotten from people being like, ‘hey, I got leaks here, join my channel.’ And after we get the join link we’re able to figure out where copyrighted content is.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">We already do have our Telegram scanner up. You know, we have about 11 million channels, you know, from our own services, but also third party providers that we use that have kind of indexed Telegram for us, which is great. Discord is a little bit trickier because it’s a TOS breach to use any sort of like bot activity on that.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\"> \u003c/span>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">TOS is Terms of Service, the contract between a platform like Telegram and its users.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">For now, like on Discord, if someone has a link that they’ve noticed that they want down, they can submit it to us and then we can do it from there. We currently don’t scan Discord because it is like a TOS breach to do, but we’re hoping as, like I said, as we grow that door can open.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Overall, what hurdles still exist when it comes to taking down deepfakes? Like what’s the kind of like technical white whale you’re still chasing?\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, I’d say the biggest thing that we’re trying to roll out is actually identifying who leaked or who deep faked XYZ content. I think if we were able to do that, we might, I wouldn’t say solve the piracy problem, but definitely lower it. You know, we’re really hoping we can get in talks with, you know, platforms like OF, Fansly or Instagram and stuff like that, uh, to roll out a technology that we’re working on where basically it embeds like an invisible watermark into different images and stuff like that.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">So if it is leaked or if it has deep faked or if someone else’s face has put on it, they’re able to know who exactly posted it based off this invisible embedded technology, which already exists for sites like Netflix. It’s how they track like video, uh, I guess leaks or, you know, from studios that maybe have like a trailer for the new Avengers movie and they want to track if it got leaked on X or anything like that.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">I think if we’re able to get that done, like I feel like we’d significantly fix the problem and be a lot more proactive. Because I mean, if people start realizing, ‘oh shoot, if I leak or deep fake content, my account gets banned. Like, it’s going to really throw a wrench in the whole leak ecosystem. And that’s what we’re really trying to build towards right now.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">You’re coming from very different sides of the internet, kind of, whether in the safe work side or the adult content industry. But this is also a problem that deeply affects both of your spheres of the creator economy. How has the proliferation of deepfake porn changed the creator industry for you? And what would you say to someone who’s afraid to keep posting?\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">The unfortunate thing is it’s such an uphill battle when it comes to deepfaked and leaked content, especially with AI getting as good as it is right now. But to somebody who is kind of scared to post right now, just know that there are people who are trying to find solutions to this kind of stuff.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">And for these people who are generating this kind of content, it’s very much about their own sense of control. It doesn’t reflect you as a creator. You shouldn’t be afraid to post what you want because of this horrible threat of somebody taking your content and basically twisting it into something that you didn’t consent to. And hopefully our government can kind of catch up with this kind of stuff here pretty soon. But there are people like me and Zander who are trying to take real steps to help mitigate this.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">For creators, I’d say, you know, if you need to, you know, get anything you need for support on it, do it. You know, if you need to take a step back, do it. And then I’d say like, it’s a twofold thing where it’s like, don’t glamorize generative AI video and image content because that only speeds up the industry and then really push for better legislation and, you know, call your Senator, call your Congressman, like get it passed. Because It’s only going to get worse as it gets easier and it’s able to be done for more people. I think those are probably the two biggest things a creator can do right now that has like an actual like tangible impact to halt this problem or make it slow down at least.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Well, thank you both so much for talking about all of this.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgpie: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, thank you for having us.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Zander Small: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, for sure.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">If you or someone you know has been targeted with deep fake porn, there are ways to have it removed. Fanlock also has free guides for creators navigating this problem. Check the show notes for more. We’ll link to a few resources about the Take It Down Act and how to remove non-consensual intimate imagery. For now, let’s close all these tabs.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Close All Tabs is a production of KQED Studios and is reported and hosted by me, Morgan Sung. This episode was produced by Chris Egusa, who also composed our theme song and credits music. It was edited by Chris Hambrick. The Close All tabs team also includes producer Maya Cueva and audio engineer, Brendan Willard. Additional music by APM. Audience engagement support from Maha Sanad. Jen Chien is our director of podcasts and Ethan Toven-Lindsey is our editor in chief.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Some members of the KQED podcast team are represented by the Screen Actors Guild, American Federation of Television and Radio Artists, San Francisco, Northern California local.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Keyboard sounds were recorded on my purple and pink dust silver K84 wired mechanical keyboard with Gateron red switches. Thanks for listening.\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\"> \u003c/span>\u003c/p>\n\u003cp> \u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>"
}
],
"link": "/news/12083428/how-an-onlyfans-model-and-a-cosplayer-are-fighting-nonconsensual-deepfake-porn",
"authors": [
"11944",
"11869",
"11832",
"11943"
],
"programs": [
"news_35082"
],
"categories": [
"news_33520"
],
"tags": [
"news_25184",
"news_34755",
"news_22973",
"news_3137",
"news_34646",
"news_2414",
"news_1859",
"news_4837",
"news_1631"
],
"featImg": "news_12083429",
"label": "source_news_12083428"
},
"news_12083278": {
"type": "posts",
"id": "news_12083278",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12083278",
"score": null,
"sort": [
1778629278000
]
},
"guestAuthors": [],
"slug": "sam-altman-defends-himself-from-elon-musks-accusations-in-openai-trial",
"title": "Sam Altman Defends Himself From Elon Musk’s Accusations in OpenAI Trial",
"publishDate": 1778629278,
"format": "standard",
"headTitle": "Sam Altman Defends Himself From Elon Musk’s Accusations in OpenAI Trial | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>On the stand on Tuesday, OpenAI CEO Sam Altman said that Elon Musk tried to \u003ca href=\"https://www.kqed.org/news/12081916/are-elon-musk-and-openai-fighting-an-ai-arms-race-sam-altmans-lawyers-think-so\">wrest control over the company\u003c/a> they co-founded before the Tesla CEO’s 2018 exit.\u003c/p>\n\u003cp>Altman’s testimony in the federal trial in Oakland, which many see as a billionaire grudge match, pushed back on Musk’s claim that the powerful AI start-up betrayed its mission to benefit the public good. Musk has accused Altman of \u003ca href=\"https://www.kqed.org/news/12081603/elon-musk-takes-aim-at-openai-as-trial-begins-its-not-ok-to-steal-a-charity\">“stealing a charity” \u003c/a>by building an $850 million for-profit company on the back of its nonprofit research lab.\u003c/p>\n\u003cp>Altman said that in early discussions about creating a for-profit arm, Musk sought majority ownership, and later proposed folding the nonprofit into his car company.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>“I read that as a lightweight threat,” Altman said of the plan to bring OpenAI into Tesla. “I don’t think it would have served the mission. I think it would have effectively destroyed the nonprofit in the process.”\u003c/p>\n\u003cp>“Mr. Musk did try to kill it, I guess twice,” he said.\u003c/p>\n\u003cp>As early as summer 2017, Altman, Musk and other OpenAI executives began discussing if and how to launch a for-profit, citing a need to raise more money to keep up with competitors like Google.\u003c/p>\n\u003cfigure id=\"attachment_12083394\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12083394 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-04-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-04-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-04-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-04-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-04-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">OpenAI CEO Sam Altman testifies in the trial in which Elon Musk claims that Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity rather than solely for profit in Oakland on May 12, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Altman said they were “running the organization on a shoestring,” with a short runway of cash. To acquire the compute — or the GPUs and CPUs needed to power AI — and funding they needed to pursue artificial general intelligence, or a superintelligent AI technology known as AGI, the company would need more significant investments, the executives determined.\u003c/p>\n\u003cp>“I thought, of course, we needed to raise billions to quickly ramp,” he said. “I saw no way to do it.”\u003c/p>\n\u003cp>Altman, Greg Brockman, the president of OpenAI and Ilya Sutskever, a former top OpenAI computer scientist and member of its founding team, have said that in those conversations, Musk repeatedly proposed plans that would give him majority control. Initially, Altman said that he asked for 90% equity in a potential for-profit.\u003c/p>\n\u003cp>The other executives pushed back on this request, including in an email Altman sent to Musk at the time, in which he said, “I am worried about control. I don’t think any one person should have control of the world’s first AGI — in fact, the whole reason we started OpenAI is so that wouldn’t happen.”[aside postID=news_12083224 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/SamAltmanGetty.jpg']Altman described Musk as “mercurial,” and said that when he left OpenAI in February 2018, after for-profit discussions fell apart, “people wondered if he’d try to take a vengeance on us” — which both he and his attorney, William Savitt, have alleged is exactly what Musk’s lawsuit aims to do.\u003c/p>\n\u003cp>During his cross-examination, though, Musk’s counsel Steven Molo seemed to suggest that it is Altman who has amassed significant control over OpenAI since it did launch a for-profit arm in 2019.\u003c/p>\n\u003cp>Molo asked Altman about the testimonies of various former OpenAI executives, who said he was untrustworthy and had a history of lying. Altman denied hearing those testimonies, but when asked if he had “repeatedly been called a liar” by people he has done business with, he said, “I have heard people say that.”\u003c/p>\n\u003cp>Molo said that Altman sits on the board of directors for both the OpenAI Foundation, the nonprofit arm, and OpenAI’s for-profit. He is also the company’s CEO.\u003c/p>\n\u003cp>“Would you ever fire yourself as the CEO of the for-profit?” Molo said, adding that the board of the nonprofit is supposed to provide oversight for the chief officer.\u003c/p>\n\u003cp>Altman said that CEOs are “almost always” on their company’s boards. When pressed, he said he had “no plans” to fire himself.\u003c/p>\n\u003cfigure id=\"attachment_12083294\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12083294\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-01-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-01-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-01-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-01-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-01-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Bret Taylor testifies in the trial in which Elon Musk claims that Sam Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity rather than solely for profit in Oakland on May 12, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Molo also asked Altman about how board members were selected following his brief firing in 2023. During the five-day ouster, there were long negotiations behind the scenes about whether Altman would return, and who would be on the board if he did. Altman, Brockman and other OpenAI executives who followed them out were also in discussions with Microsoft, OpenAI’s largest financial backer, which had offered to bring them on to start a new AI team.\u003c/p>\n\u003cp>Altman said initially he’d proposed to remove OpenAI’s board, which fired him, and replace it with four members, including himself. Altman was not made a board member at that time, but Molo said that he had proposed the three members who were ultimately selected — Bret Taylor, Larry Summers and Adam D’Angelo — in conversations with Microsoft CEO Satya Nadella.\u003c/p>\n\u003cp>Altman said that he had no power to appoint new board members, but that he did say which configurations he would be “willing” to be rehired into.\u003c/p>\n\u003cp>Earlier in the day, he characterized his return to OpenAI as running “back into a burning building to try to save it.”\u003c/p>\n\u003cp>Later this week, both Altman and Musk’s legal teams will present their closing arguments. Then the jury and judge will decide which tech leader to believe.\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "During a brief cross-examination of Altman, the Tesla CEO’s attorney questioned whether or not Altman was trustworthy.",
"status": "publish",
"parent": 0,
"modified": 1778630872,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 22,
"wordCount": 990
},
"headData": {
"title": "Sam Altman Defends Himself From Elon Musk’s Accusations in OpenAI Trial | KQED",
"description": "During a brief cross-examination of Altman, the Tesla CEO’s attorney questioned whether or not Altman was trustworthy.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Sam Altman Defends Himself From Elon Musk’s Accusations in OpenAI Trial",
"datePublished": "2026-05-12T16:41:18-07:00",
"dateModified": "2026-05-12T17:07:52-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"sticky": false,
"nprStoryId": "kqed-12083278",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12083278/sam-altman-defends-himself-from-elon-musks-accusations-in-openai-trial",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>On the stand on Tuesday, OpenAI CEO Sam Altman said that Elon Musk tried to \u003ca href=\"https://www.kqed.org/news/12081916/are-elon-musk-and-openai-fighting-an-ai-arms-race-sam-altmans-lawyers-think-so\">wrest control over the company\u003c/a> they co-founded before the Tesla CEO’s 2018 exit.\u003c/p>\n\u003cp>Altman’s testimony in the federal trial in Oakland, which many see as a billionaire grudge match, pushed back on Musk’s claim that the powerful AI start-up betrayed its mission to benefit the public good. Musk has accused Altman of \u003ca href=\"https://www.kqed.org/news/12081603/elon-musk-takes-aim-at-openai-as-trial-begins-its-not-ok-to-steal-a-charity\">“stealing a charity” \u003c/a>by building an $850 million for-profit company on the back of its nonprofit research lab.\u003c/p>\n\u003cp>Altman said that in early discussions about creating a for-profit arm, Musk sought majority ownership, and later proposed folding the nonprofit into his car company.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>“I read that as a lightweight threat,” Altman said of the plan to bring OpenAI into Tesla. “I don’t think it would have served the mission. I think it would have effectively destroyed the nonprofit in the process.”\u003c/p>\n\u003cp>“Mr. Musk did try to kill it, I guess twice,” he said.\u003c/p>\n\u003cp>As early as summer 2017, Altman, Musk and other OpenAI executives began discussing if and how to launch a for-profit, citing a need to raise more money to keep up with competitors like Google.\u003c/p>\n\u003cfigure id=\"attachment_12083394\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12083394 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-04-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-04-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-04-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-04-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-04-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">OpenAI CEO Sam Altman testifies in the trial in which Elon Musk claims that Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity rather than solely for profit in Oakland on May 12, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Altman said they were “running the organization on a shoestring,” with a short runway of cash. To acquire the compute — or the GPUs and CPUs needed to power AI — and funding they needed to pursue artificial general intelligence, or a superintelligent AI technology known as AGI, the company would need more significant investments, the executives determined.\u003c/p>\n\u003cp>“I thought, of course, we needed to raise billions to quickly ramp,” he said. “I saw no way to do it.”\u003c/p>\n\u003cp>Altman, Greg Brockman, the president of OpenAI and Ilya Sutskever, a former top OpenAI computer scientist and member of its founding team, have said that in those conversations, Musk repeatedly proposed plans that would give him majority control. Initially, Altman said that he asked for 90% equity in a potential for-profit.\u003c/p>\n\u003cp>The other executives pushed back on this request, including in an email Altman sent to Musk at the time, in which he said, “I am worried about control. I don’t think any one person should have control of the world’s first AGI — in fact, the whole reason we started OpenAI is so that wouldn’t happen.”\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12083224",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/SamAltmanGetty.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Altman described Musk as “mercurial,” and said that when he left OpenAI in February 2018, after for-profit discussions fell apart, “people wondered if he’d try to take a vengeance on us” — which both he and his attorney, William Savitt, have alleged is exactly what Musk’s lawsuit aims to do.\u003c/p>\n\u003cp>During his cross-examination, though, Musk’s counsel Steven Molo seemed to suggest that it is Altman who has amassed significant control over OpenAI since it did launch a for-profit arm in 2019.\u003c/p>\n\u003cp>Molo asked Altman about the testimonies of various former OpenAI executives, who said he was untrustworthy and had a history of lying. Altman denied hearing those testimonies, but when asked if he had “repeatedly been called a liar” by people he has done business with, he said, “I have heard people say that.”\u003c/p>\n\u003cp>Molo said that Altman sits on the board of directors for both the OpenAI Foundation, the nonprofit arm, and OpenAI’s for-profit. He is also the company’s CEO.\u003c/p>\n\u003cp>“Would you ever fire yourself as the CEO of the for-profit?” Molo said, adding that the board of the nonprofit is supposed to provide oversight for the chief officer.\u003c/p>\n\u003cp>Altman said that CEOs are “almost always” on their company’s boards. When pressed, he said he had “no plans” to fire himself.\u003c/p>\n\u003cfigure id=\"attachment_12083294\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12083294\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-01-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-01-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-01-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-01-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260512-MUSK-ALTMAN-TRIAL-VB-01-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Bret Taylor testifies in the trial in which Elon Musk claims that Sam Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity rather than solely for profit in Oakland on May 12, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Molo also asked Altman about how board members were selected following his brief firing in 2023. During the five-day ouster, there were long negotiations behind the scenes about whether Altman would return, and who would be on the board if he did. Altman, Brockman and other OpenAI executives who followed them out were also in discussions with Microsoft, OpenAI’s largest financial backer, which had offered to bring them on to start a new AI team.\u003c/p>\n\u003cp>Altman said initially he’d proposed to remove OpenAI’s board, which fired him, and replace it with four members, including himself. Altman was not made a board member at that time, but Molo said that he had proposed the three members who were ultimately selected — Bret Taylor, Larry Summers and Adam D’Angelo — in conversations with Microsoft CEO Satya Nadella.\u003c/p>\n\u003cp>Altman said that he had no power to appoint new board members, but that he did say which configurations he would be “willing” to be rehired into.\u003c/p>\n\u003cp>Earlier in the day, he characterized his return to OpenAI as running “back into a burning building to try to save it.”\u003c/p>\n\u003cp>Later this week, both Altman and Musk’s legal teams will present their closing arguments. Then the jury and judge will decide which tech leader to believe.\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12083278/sam-altman-defends-himself-from-elon-musks-accusations-in-openai-trial",
"authors": [
"11913",
"251"
],
"categories": [
"news_6188",
"news_28250",
"news_8",
"news_248"
],
"tags": [
"news_34755",
"news_1386",
"news_32668",
"news_3897",
"news_27626",
"news_21891",
"news_34054",
"news_33542",
"news_33543",
"news_34586",
"news_1631"
],
"featImg": "news_12083392",
"label": "news"
},
"news_12083224": {
"type": "posts",
"id": "news_12083224",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12083224",
"score": null,
"sort": [
1778546112000
]
},
"guestAuthors": [],
"slug": "former-openai-exec-calls-decision-to-remove-sam-altman-a-hail-mary-during-musk-trial",
"title": "Former OpenAI Exec Calls Decision to Remove Sam Altman a ‘Hail Mary’ During Musk Trial",
"publishDate": 1778546112,
"format": "standard",
"headTitle": "Former OpenAI Exec Calls Decision to Remove Sam Altman a ‘Hail Mary’ During Musk Trial | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>Microsoft’s CEO and another major player took the stand on Monday in \u003ca href=\"https://www.kqed.org/news/tag/oakland\">Oakland\u003c/a>, testifying in the blockbuster trial between OpenAI co-founders Elon Musk and Sam Altman.\u003c/p>\n\u003cp>Ahead of Altman’s testimony, Musk’s attorney Steven Molo questioned Microsoft CEO Satya Nadella and Ilya Sutskever, a top OpenAI computer scientist who departed the company in 2024. Sutskever discussed his role in orchestrating Altman’s brief ouster in 2023.\u003c/p>\n\u003cp>Over five days in November 2023, Altman was removed and reinstated from his post, after a coalition of board members raised concerns that he had not been “consistently candid in his communications” and cited a breakdown of trust.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>Whether Altman and other executives have maintained OpenAI’s initial stated mission — to develop AI safely and for the “benefit of humanity” — is critical to Musk’s suit, which claims that leaders breached their duty to its nonprofit mission by building a for-profit company on top of it. Musk also alleged that the company unfairly benefited at his expense.\u003c/p>\n\u003cp>Musk also alleges that Microsoft, which is OpenAI’s largest financial backer and until this week held the exclusive rights to license and sell its technology, aided and abetted that breach of trust.\u003c/p>\n\u003cp>Molo questioned Nadella about Microsoft’s motive to invest in OpenAI — a $13 billion input that Nadella said is expected to see a return of about $92 billion, “if it works out.”\u003c/p>\n\u003cfigure id=\"attachment_12081686\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12081686 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Steve Molo, Elon Musk’s attorney, presents opening statements in the trial in which Elon Musk (center-right) claims that Sam Altman (right) and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland, on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Musk’s attorney pointed out Nadella’s fiduciary duty to maximize profit, and referenced a series of texts between him and Altman that appeared to show Nadella pushing for an earlier rollout of the paid version of ChatGPT.\u003c/p>\n\u003cp>“When chatGPT paid?” Nadella wrote in the message.\u003c/p>\n\u003cp>Altman said that there was “Not enough compute to make it a good consumer experience,” to which Nadella said, “The sooner the better.”\u003c/p>\n\u003cp>Nadella said that the reason Microsoft invested was that OpenAI was pursuing a for-profit model, but he said, “If the pie became larger, the nonprofit would benefit as well.”[aside postID=news_12081916 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AP26118555622828-2000x1333.jpg']Molo asked Nadella if he was aware that, for a period of time, OpenAI’s nonprofit did not have any employees.\u003c/p>\n\u003cp>“I am not,” Nadella said.\u003c/p>\n\u003cp>Molo also questioned Nadella about Microsoft’s role during Altman’s brief ouster. At the time, Nadella announced that he would hire Altman, along with OpenAI’s third co-founder and current president, Greg Brockman, as well as other allies, to head up a new AI team at Microsoft.\u003c/p>\n\u003cp>Nadella said that he “had ideas about how Sam [Altman] and the other employees could join Microsoft if they were not reinstated.”\u003c/p>\n\u003cp>“If people were going to leave OpenAI, I wanted them to come to Microsoft,” he said.\u003c/p>\n\u003cp>Molo asked Nadella if he knew why Altman had been removed, to which Nadella said he was never given an “explicit answer.”\u003c/p>\n\u003cp>“Did the thought occur to you … the board might issue a public statement about why they fired Altman?” Molo said.\u003c/p>\n\u003cp>Nadella said during that period — referred to as “The Blip” by many OpenAI employees — he was focused on ensuring continuity for customers.\u003c/p>\n\u003cp>“It goes back to me wanting to communicate to customers that they can count on us,” he said. “Come Monday, that doesn’t just disappear.”\u003c/p>\n\u003cfigure id=\"attachment_12082325\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12082325 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260504-MUSK-ALTMAN-VB-03-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260504-MUSK-ALTMAN-VB-03-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260504-MUSK-ALTMAN-VB-03-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260504-MUSK-ALTMAN-VB-03-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260504-MUSK-ALTMAN-VB-03-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">OpenAI CEO Sam Altman watches as OpenAI President Greg Brockman testifies in the trial in which Elon Musk claims that Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland, on May 4, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Sutskever, who took the stand after Nadella, described Altman’s removal differently. He said it was a “Hail Mary” to save OpenAI, which had become an environment that was “not conducive” to the technology’s safety.\u003c/p>\n\u003cp>“I felt a great deal of ownership of OpenAI,” he said. “I felt like I created this company. I simply cared for it, and I didn’t want it to be destroyed.”\u003c/p>\n\u003cp>Sutskever, who helped lead the ouster, had compiled a more than 50-page record of Altman’s “consistent pattern of lying,” including misrepresenting facts, safety protocols and company information to the board and executives.\u003c/p>\n\u003cp>Sutskever maintained that he had worked on a team that aimed to focus on long-term risks as more powerful AI was built.\u003c/p>\n\u003cp>“The goal of the super alignment is to do the research in advance, such that humanity will have the technological means to make it controlled and safe,” he said.\u003c/p>\n\u003cp>The team was disbanded days after he departed the company, in May 2024.\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "The testimonies on Monday centered on Sam Altman’s brief 2023 ousting from OpenAI, as allegations mounted against the tech giant’s conduct and Microsoft’s motives in backing the AI company.",
"status": "publish",
"parent": 0,
"modified": 1778547375,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 26,
"wordCount": 890
},
"headData": {
"title": "Former OpenAI Exec Calls Decision to Remove Sam Altman a ‘Hail Mary’ During Musk Trial | KQED",
"description": "The testimonies on Monday centered on Sam Altman’s brief 2023 ousting from OpenAI, as allegations mounted against the tech giant’s conduct and Microsoft’s motives in backing the AI company.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Former OpenAI Exec Calls Decision to Remove Sam Altman a ‘Hail Mary’ During Musk Trial",
"datePublished": "2026-05-11T17:35:12-07:00",
"dateModified": "2026-05-11T17:56:15-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"sticky": false,
"nprStoryId": "kqed-12083224",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12083224/former-openai-exec-calls-decision-to-remove-sam-altman-a-hail-mary-during-musk-trial",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>Microsoft’s CEO and another major player took the stand on Monday in \u003ca href=\"https://www.kqed.org/news/tag/oakland\">Oakland\u003c/a>, testifying in the blockbuster trial between OpenAI co-founders Elon Musk and Sam Altman.\u003c/p>\n\u003cp>Ahead of Altman’s testimony, Musk’s attorney Steven Molo questioned Microsoft CEO Satya Nadella and Ilya Sutskever, a top OpenAI computer scientist who departed the company in 2024. Sutskever discussed his role in orchestrating Altman’s brief ouster in 2023.\u003c/p>\n\u003cp>Over five days in November 2023, Altman was removed and reinstated from his post, after a coalition of board members raised concerns that he had not been “consistently candid in his communications” and cited a breakdown of trust.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>Whether Altman and other executives have maintained OpenAI’s initial stated mission — to develop AI safely and for the “benefit of humanity” — is critical to Musk’s suit, which claims that leaders breached their duty to its nonprofit mission by building a for-profit company on top of it. Musk also alleged that the company unfairly benefited at his expense.\u003c/p>\n\u003cp>Musk also alleges that Microsoft, which is OpenAI’s largest financial backer and until this week held the exclusive rights to license and sell its technology, aided and abetted that breach of trust.\u003c/p>\n\u003cp>Molo questioned Nadella about Microsoft’s motive to invest in OpenAI — a $13 billion input that Nadella said is expected to see a return of about $92 billion, “if it works out.”\u003c/p>\n\u003cfigure id=\"attachment_12081686\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12081686 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Steve Molo, Elon Musk’s attorney, presents opening statements in the trial in which Elon Musk (center-right) claims that Sam Altman (right) and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland, on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Musk’s attorney pointed out Nadella’s fiduciary duty to maximize profit, and referenced a series of texts between him and Altman that appeared to show Nadella pushing for an earlier rollout of the paid version of ChatGPT.\u003c/p>\n\u003cp>“When chatGPT paid?” Nadella wrote in the message.\u003c/p>\n\u003cp>Altman said that there was “Not enough compute to make it a good consumer experience,” to which Nadella said, “The sooner the better.”\u003c/p>\n\u003cp>Nadella said that the reason Microsoft invested was that OpenAI was pursuing a for-profit model, but he said, “If the pie became larger, the nonprofit would benefit as well.”\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12081916",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AP26118555622828-2000x1333.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Molo asked Nadella if he was aware that, for a period of time, OpenAI’s nonprofit did not have any employees.\u003c/p>\n\u003cp>“I am not,” Nadella said.\u003c/p>\n\u003cp>Molo also questioned Nadella about Microsoft’s role during Altman’s brief ouster. At the time, Nadella announced that he would hire Altman, along with OpenAI’s third co-founder and current president, Greg Brockman, as well as other allies, to head up a new AI team at Microsoft.\u003c/p>\n\u003cp>Nadella said that he “had ideas about how Sam [Altman] and the other employees could join Microsoft if they were not reinstated.”\u003c/p>\n\u003cp>“If people were going to leave OpenAI, I wanted them to come to Microsoft,” he said.\u003c/p>\n\u003cp>Molo asked Nadella if he knew why Altman had been removed, to which Nadella said he was never given an “explicit answer.”\u003c/p>\n\u003cp>“Did the thought occur to you … the board might issue a public statement about why they fired Altman?” Molo said.\u003c/p>\n\u003cp>Nadella said during that period — referred to as “The Blip” by many OpenAI employees — he was focused on ensuring continuity for customers.\u003c/p>\n\u003cp>“It goes back to me wanting to communicate to customers that they can count on us,” he said. “Come Monday, that doesn’t just disappear.”\u003c/p>\n\u003cfigure id=\"attachment_12082325\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12082325 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260504-MUSK-ALTMAN-VB-03-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260504-MUSK-ALTMAN-VB-03-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260504-MUSK-ALTMAN-VB-03-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260504-MUSK-ALTMAN-VB-03-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/260504-MUSK-ALTMAN-VB-03-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">OpenAI CEO Sam Altman watches as OpenAI President Greg Brockman testifies in the trial in which Elon Musk claims that Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland, on May 4, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Sutskever, who took the stand after Nadella, described Altman’s removal differently. He said it was a “Hail Mary” to save OpenAI, which had become an environment that was “not conducive” to the technology’s safety.\u003c/p>\n\u003cp>“I felt a great deal of ownership of OpenAI,” he said. “I felt like I created this company. I simply cared for it, and I didn’t want it to be destroyed.”\u003c/p>\n\u003cp>Sutskever, who helped lead the ouster, had compiled a more than 50-page record of Altman’s “consistent pattern of lying,” including misrepresenting facts, safety protocols and company information to the board and executives.\u003c/p>\n\u003cp>Sutskever maintained that he had worked on a team that aimed to focus on long-term risks as more powerful AI was built.\u003c/p>\n\u003cp>“The goal of the super alignment is to do the research in advance, such that humanity will have the technological means to make it controlled and safe,” he said.\u003c/p>\n\u003cp>The team was disbanded days after he departed the company, in May 2024.\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12083224/former-openai-exec-calls-decision-to-remove-sam-altman-a-hail-mary-during-musk-trial",
"authors": [
"11913",
"251"
],
"categories": [
"news_31795",
"news_6188",
"news_28250",
"news_8",
"news_248"
],
"tags": [
"news_34755",
"news_1386",
"news_32668",
"news_3897",
"news_27626",
"news_19954",
"news_21891",
"news_34054",
"news_33542",
"news_33543",
"news_34586",
"news_1631"
],
"featImg": "news_12083235",
"label": "news"
},
"news_12082428": {
"type": "posts",
"id": "news_12082428",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12082428",
"score": null,
"sort": [
1778061636000
]
},
"guestAuthors": [],
"slug": "inside-sam-altman-and-elon-musks-battle-over-openai",
"title": "Inside Elon Musk and Sam Altman's Battle Over OpenAI",
"publishDate": 1778061636,
"format": "audio",
"headTitle": "Inside Elon Musk and Sam Altman’s Battle Over OpenAI | KQED",
"labelTerm": {},
"content": "\u003cp>\u003cspan style=\"font-weight: 400\">Jurors and journalists are getting a peek into the world of OpenAI and its founding as two of the richest, most powerful men in tech duke it out in an Oakland federal courthouse. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Elon Musk claims that Sam Altman and other co-founders of OpenAI abandoned their founding promise to develop AI for the benefit of humanity. But does anyone here really have our best interests at heart? KQED’s Rachael Myrow takes us inside.\u003c/span>\u003c/p>\n\u003cp>\u003cstrong>Links:\u003c/strong>\u003c/p>\n\u003cul>\n\u003cli style=\"list-style-type: none\">\n\u003cul>\n\u003cli>\u003ca href=\"https://www.kqed.org/news/12081290/how-to-unscramble-an-omelet-in-silicon-valley-the-musk-v-altman-trial-that-will-try\">How to Unscramble an Omelet in Silicon Valley: The Musk v. Altman Trial That Will Try\u003c/a>\u003c/li>\n\u003c/ul>\n\u003c/li>\n\u003c/ul>\n\u003cp>\u003c!-- iframe plugin v.4.3 wordpress.org/plugins/iframe/ -->\u003cbr>\n\u003ciframe loading=\"lazy\" frameborder=\"0\" height=\"200\" scrolling=\"no\" src=\"https://playlist.megaphone.fm?e=KQINC4004396119\" width=\"100%\" class=\"iframe-class\">\u003c/iframe>\u003c/p>\n\u003ch3>\u003cstrong>Episode Transcript\u003c/strong>\u003c/h3>\n\u003cp>\u003cem>This is a computer-generated transcript. While our team has reviewed it, there may be errors.\u003c/em>\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:00:49] \u003c/em>I’m Ericka Cruz-Gavarra and welcome to The Bay, local news to keep you rooted. Inside a federal courthouse in downtown Oakland, in front of a judge and a jury of their peers, two of the most powerful men in the world are duking it out in court over whether OpenAI, the company behind ChatGPT, was built on a lie. Elon Musk is suing OpenAI and its CEO, Sam Altman. For abandoning their founding promise to develop AI for the benefit of humanity. And whether or not you actually believe any of them really had our best interests in mind, one thing is true, that the battle over who runs AI is all about ego and power.\u003c/p>\n\u003cp>\u003cb>Ashley Ortiz: \u003c/b>\u003cem>[00:01:47] \u003c/em>No matter which side wins, the people are going to lose because they are not doing this actually for the benefit of humanity, it’s not about ethics, this is all about power plays within an unfettered, unregulated AI scheme.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:02:01] \u003c/em>Today, KQED’s Rachael Myrow takes us inside the OpenAI trial.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:02:19] \u003c/em>It seems like you’re in a pretty dynamic scene right now, Rachael. Can you actually tell us where you are?\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:02:26] \u003c/em>I’m outside the federal courthouse in Oakland where Musk v. Altman et al. Is playing out.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:02:36] \u003c/em>Rachael Myrow is a senior editor at KQED.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:02:41] \u003c/em>This plaza is right on the street so you hear the chirping every time somebody presses a button to cross the street. You hear garbage trucks rolling past. Inside the courtroom is presided over by Judge Yvonne Gonzalez Rogers and it is packed every single day. Armies of lawyers of course but also journalists from across the country, even a couple from France. And some members of the public. I’d like to call this the hottest theater ticket in Silicon Valley. We got to see Elon Musk spend four days on the witness stand. Sam Altman is sitting just a few feet away in the defense section. These two men genuinely cannot stand each other.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:03:30] \u003c/em>And I understand, Rachel, that there’s not just folks inside of the courtroom for this trial, but also outside protesting as well.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:03:41] \u003c/em>Yes, on the very first day, actually when jury selection was taking place, protesters gathered in large numbers outside the courthouse on the plaza with some very pointed and colorful signs.\u003c/p>\n\u003cp>\u003cb>Valerie Sizemore: \u003c/b>\u003cem>[00:03:57] \u003c/em>I used to be a software engineer, but have been unemployed by AI. So now I’m trying to make the resistance happen.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:04:06] \u003c/em>I talked to one protester, Valerie Sizemore of Berkeley, who kind of represented, I think, a lot of Bayarians.\u003c/p>\n\u003cp>\u003cb>Valerie Sizemore: \u003c/b>\u003cem>[00:04:15] \u003c/em>I’m not here because I care about the outcome of this trial. I really don’t care. I hope it’s really expensive for someone and like hurts both companies as much as possible.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:04:27] \u003c/em>Yeah, and it’s, I guess, two-for-one for her to just be outside the courthouse protesting the both of them.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:04:34] \u003c/em>Exactly.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:04:39] \u003c/em>Well, Rachael, I wanna step back a little bit and talk about this trial and just how we even got here. I mean, remind us who is on trial and what exactly these two are fighting over?\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:04:53] \u003c/em>So it’s a little more than two people. Elon Musk is suing Sam Altman and also Greg Brockman, who is OpenAI’s co-founder and president. Musk is sueing OpenAI itself and also Microsoft, which invested $13 billion in OpenAI after Musk left.\u003c/p>\n\u003cp>\u003cb>Interviewer: \u003c/b>\u003cem>[00:05:18] \u003c/em>All right, we’re gonna wrap up the day. I’m gonna do a fireside chat with Sam Altman.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:05:25] \u003c/em>Let’s dial the clock all the way back to 2015. Musk and Altman found OpenAI as a nonprofit explicitly to develop artificial general intelligence safely and for the benefit of all humanity.\u003c/p>\n\u003cp>\u003cb>Sam Altman: \u003c/b>\u003cem>[00:05:44] \u003c/em>You know, I think AI will probably, like most likely, sort of lead to the end of the world, but in the meantime, there will be great companies created with serious machine learning. I actually just agreed to fund a company that is not even really a company, sort of a semi-company, semi-nonprofit, doing AI safety research.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:06:04] \u003c/em>At some point shortly thereafter, it became clear to all parties involved, including Musk, that they needed to establish a for-profit arm as well in order to raise money to pay for things like computing power for this very energy-intensive computer software and also to bring in talent, to bring the best minds of the industry. Musk’s lawsuit is arguing that thereabouts Altman and other co-founders of OpenAI, because there were other people involved, betrayed the mission, that they were actually in it for the profit.\u003c/p>\n\u003cp>\u003cb>Interviewer: \u003c/b>\u003cem>[00:06:45] \u003c/em>Open AI, I mean you seem somewhat frustrated with them. You were one of the big contributors early on?\u003c/p>\n\u003cp>\u003cb>Elon Musk: \u003c/b>\u003cem>[00:06:49] \u003c/em>The reason, I am the reason Open AI exists.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:06:54] \u003c/em>So he wants more than his money back. He wants Altman and OpenAI’s co-founder and president, Greg Brockman, taken off the board. And he wants $130 billion, disgorged by the for-profit and handed over to the non-profit. The word charity, Ericka, doesn’t appear once in OpenAI’s founding blog post, but Musk keeps referring to OpenAI as a charity. But as OpenAI lawyers like to point out, Musk left OpenAI and then he launched his own AI venture, XAI, which is not a nonprofit and arguably does not operate for the benefit of humanity, for which it has been sued repeatedly.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:07:50] \u003c/em>So it sounds like Elon Musk is basically saying they stole his charity, and Sam Altman is saying, ‘You chose to walk away.’\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:08:02] \u003c/em>Yeah. That’s it in a nutshell. There was this funny moment when Musk was on the witness stand. He looked at the jury and he said, quote, it’s not OK to steal a charity. And then he predicted that if Open AI wins this case, the face of charity law in America could be altered forever. At some point, the judge broke in and said, let’s remind the jury, you’re not a lawyer. She’s talking to Musk. And then he replied. I did take Law 101, which got a laugh out of most people in the court.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:08:36] \u003c/em>Geez.\u003c/p>\n\u003cp>\u003cb>\u003c/b>\u003cem>[00:08:39] \u003c/em>Rachael, what do we make of Sam Altman’s role in this? It sounds like Elon Musk is saying that Sam Altman lied to him.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:08:46] \u003c/em>That is a very good question. I need to mention here that we have not seen Sam Altman take the stand in this trial yet. So Altman has not yet had the chance to make his case. Just a few weeks ago, we saw a comprehensive profile of Sam Altman in the New Yorker magazine talking to lots and lots of people that Sam Altmann is an inveterate liar, the kind of person who will tell you what you want to hear and then go back on it. We haven’t had the opportunity yet to really get into what his character was like during the early days of OpenAI, but pretty much everyone in that courtroom has read that article.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:09:47] \u003c/em>Coming up, what the OpenAI trial is really about. Stay with us.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:10:38] \u003c/em>I mean, Rachael, I gotta say, as I’m reading these stories about this case, it really just sounds like a fight between two of some of the richest billionaires in Silicon Valley over this company that they co-founded. But obviously, what’s at the center of it and what is at stake is this very powerful technology that even they seem to acknowledge has the potential to change the world. So what do you think this is really about?\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:11:13] \u003c/em>Clearly about power, clearly about money, clearly about market dominance. And I do want to say that even though the judge is saying we are not going to talk about the AI apocalypse, it is something that is genuinely on the minds of all of these people in the industry in Silicon Valley and also the rest of us, right? I mean there are people here who take AI safety seriously. Who also think OpenAI has drifted dangerously from its mission. I mean, we’ve seen bad actors using the software who have upended the labor market, terrified all of us from a cybersecurity perspective, made it impossible to get redress as a customer and sometimes as a citizen, enabled a surveillance state here and abroad. I mean I could go on, Ericka, because It’s 100% clear to us and the people building this software that there’s a race to the bottom going on from a moral perspective.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:12:30] \u003c/em>I do want to ask you this question, Rachel, because Elon Musk is saying in this trial that he is the one standing up for the public on AI. Rachel, is there someone working in the public interest when it comes to AI and holding AI companies accountable?\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:12:53] \u003c/em>Well, you know, don’t we wish? He’d like to present himself as thinking pro-human first, but you know, he also created XAI and has reportedly personally directed his engineers to make XAI a manifestly unsafe product. The judge noted the irony out loud. She said to Musk’s attorneys at one point, It is ironic that your client, despite these risks, is creating a company in the exact same space. And then she added, and I just thought this was so remarkable, coming from, again, a sitting federal judge, quote, I suspect there are people who don’t want to put the future in Mr. Musk’s hands, unquote.\u003c/p>\n\u003cp>\u003cb>Jill Horowitz: \u003c/b>\u003cem>[00:13:44] \u003c/em>And in that sense, I don’t understand why Musk is the one who gets to ask that question. Jill Horowitz, who specifically specializes in non-profit law. At Northwestern’s law school put it this way when parties have this much money and this much power they can trample over conventional protections of the public interest\u003c/p>\n\u003cp>\u003cb>Jill Horowitz: \u003c/b>\u003cem>[00:14:07] \u003c/em>We’ve got a CEO who is a very powerful player. And then we have this outside party who’s purported to be thinking about the best interest of the nonprofit, but he’s a competitor.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:14:22] \u003c/em>Congress, you don’t need me to tell you, hasn’t passed any meaningful federal AI regulation. The Trump administration is lobbying alongside the lobbyists for unfettered freedom for the AI industry. And so we end up here, Ericka, in a federal courthouse in Oakland watching two billionaires fight over their recent past. This trial gives us a window into the wheeling and dealing. But it doesn’t give us any power to change the trajectory of AI.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:14:54] \u003c/em>Rachael what happens if if either Elon Musk or Sam Altman wins this trial?\u003c/p>\n\u003cp>\u003cb>\u003c/b>\u003cem>[00:15:01] \u003c/em>So if Musk wins, Judge Gonzalez-Rogers could order OpenAI to revert to a non-profit structure, remove Altman and Brockman, direct some $130 billion in gains back to the non- profit foundation. That would be legally unprecedented and would certainly send shockwaves throughout Silicon Valley. If OpenAI wins, the restructuring stands, the IPO proceeds. And the message to the industry is essentially, you can do this too. You can take a non-profit, make it nominally in charge of a for-profit arm that you build into a trillion-dollar company, and the legal system won’t stop you.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:15:52] \u003c/em>Last question for you, Rachael. For the protesters outside, what do you think they want to see happen? And do you they care here about who wins?\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:16:03] \u003c/em>My sense from talking with Ashley Ortiz, who was one of the organizers of the first and biggest protest outside, is that for a lot of the people out here carrying signs and wearing t-shirts that say stop AI, neither Musk nor Altman represents their interests and by extension the public’s interests.\u003c/p>\n\u003cp>\u003cb>Ashley Ortiz: \u003c/b>\u003cem>[00:16:28] \u003c/em>Decision everyone sucks here and y’all both need to take responsibility for your part in this crappy situation.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:16:34] \u003c/em>They want accountability for AI, period. I don’t know if they actually think they’re gonna get what they’re asking for, but they wanna make a noise while they can.\u003c/p>\n\u003cp>\u003cb>Ashley Ortiz: \u003c/b>\u003cem>[00:16:46] \u003c/em>We’re letting them both know that both sides, no matter which side wins, the people are going to lose because they are not doing this actually for the benefit of humanity. It’s not about ethics. This is all about power plays within an unfettered, unregulated AI scape.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:17:06] \u003c/em>These are the models that are changing our world, and they’re doing it now. And regardless of whether OpenAI survives this trial, we’re still gonna have the world that OpenAI helped to create.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:17:28] \u003c/em>Well, Rachael, thank you so much for chatting with me outside the courtroom and for making the time in your busy morning, I appreciate it.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:17:36] \u003c/em>You bet.\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">Some members of the KQED podcast team are represented by The Screen Actors Guild, American Federation of Television and Radio Artists, San Francisco-Northern California Local.\u003c/span>\u003c/i>\u003c/p>\n\n",
"blocks": [],
"excerpt": "At issue is whether Sam Altman abandoned his founding promise with Elon Musk to develop AI for the benefit of humanity.",
"status": "publish",
"parent": 0,
"modified": 1778097044,
"stats": {
"hasAudio": true,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 54,
"wordCount": 2552
},
"headData": {
"title": "Inside Elon Musk and Sam Altman's Battle Over OpenAI | KQED",
"description": "At issue is whether Sam Altman abandoned his founding promise with Elon Musk to develop AI for the benefit of humanity.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Inside Elon Musk and Sam Altman's Battle Over OpenAI",
"datePublished": "2026-05-06T03:00:36-07:00",
"dateModified": "2026-05-06T12:50:44-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 8,
"slug": "news",
"name": "News"
},
"source": "The Bay",
"sourceUrl": "https://www.kqed.org/podcasts/thebay",
"audioUrl": "https://www.podtrac.com/pts/redirect.mp3/traffic.megaphone.fm/KQINC4004396119.mp3",
"sticky": false,
"nprStoryId": "kqed-12082428",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12082428/inside-sam-altman-and-elon-musks-battle-over-openai",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>\u003cspan style=\"font-weight: 400\">Jurors and journalists are getting a peek into the world of OpenAI and its founding as two of the richest, most powerful men in tech duke it out in an Oakland federal courthouse. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Elon Musk claims that Sam Altman and other co-founders of OpenAI abandoned their founding promise to develop AI for the benefit of humanity. But does anyone here really have our best interests at heart? KQED’s Rachael Myrow takes us inside.\u003c/span>\u003c/p>\n\u003cp>\u003cstrong>Links:\u003c/strong>\u003c/p>\n\u003cul>\n\u003cli style=\"list-style-type: none\">\n\u003cul>\n\u003cli>\u003ca href=\"https://www.kqed.org/news/12081290/how-to-unscramble-an-omelet-in-silicon-valley-the-musk-v-altman-trial-that-will-try\">How to Unscramble an Omelet in Silicon Valley: The Musk v. Altman Trial That Will Try\u003c/a>\u003c/li>\n\u003c/ul>\n\u003c/li>\n\u003c/ul>\n\u003cp>\u003c!-- iframe plugin v.4.3 wordpress.org/plugins/iframe/ -->\u003cbr>\n\u003ciframe loading=\"lazy\" frameborder=\"0\" height=\"200\" scrolling=\"no\" src=\"https://playlist.megaphone.fm?e=KQINC4004396119\" width=\"100%\" class=\"iframe-class\">\u003c/iframe>\u003c/p>\n\u003ch3>\u003cstrong>Episode Transcript\u003c/strong>\u003c/h3>\n\u003cp>\u003cem>This is a computer-generated transcript. While our team has reviewed it, there may be errors.\u003c/em>\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:00:49] \u003c/em>I’m Ericka Cruz-Gavarra and welcome to The Bay, local news to keep you rooted. Inside a federal courthouse in downtown Oakland, in front of a judge and a jury of their peers, two of the most powerful men in the world are duking it out in court over whether OpenAI, the company behind ChatGPT, was built on a lie. Elon Musk is suing OpenAI and its CEO, Sam Altman. For abandoning their founding promise to develop AI for the benefit of humanity. And whether or not you actually believe any of them really had our best interests in mind, one thing is true, that the battle over who runs AI is all about ego and power.\u003c/p>\n\u003cp>\u003cb>Ashley Ortiz: \u003c/b>\u003cem>[00:01:47] \u003c/em>No matter which side wins, the people are going to lose because they are not doing this actually for the benefit of humanity, it’s not about ethics, this is all about power plays within an unfettered, unregulated AI scheme.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:02:01] \u003c/em>Today, KQED’s Rachael Myrow takes us inside the OpenAI trial.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:02:19] \u003c/em>It seems like you’re in a pretty dynamic scene right now, Rachael. Can you actually tell us where you are?\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:02:26] \u003c/em>I’m outside the federal courthouse in Oakland where Musk v. Altman et al. Is playing out.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:02:36] \u003c/em>Rachael Myrow is a senior editor at KQED.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:02:41] \u003c/em>This plaza is right on the street so you hear the chirping every time somebody presses a button to cross the street. You hear garbage trucks rolling past. Inside the courtroom is presided over by Judge Yvonne Gonzalez Rogers and it is packed every single day. Armies of lawyers of course but also journalists from across the country, even a couple from France. And some members of the public. I’d like to call this the hottest theater ticket in Silicon Valley. We got to see Elon Musk spend four days on the witness stand. Sam Altman is sitting just a few feet away in the defense section. These two men genuinely cannot stand each other.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:03:30] \u003c/em>And I understand, Rachel, that there’s not just folks inside of the courtroom for this trial, but also outside protesting as well.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:03:41] \u003c/em>Yes, on the very first day, actually when jury selection was taking place, protesters gathered in large numbers outside the courthouse on the plaza with some very pointed and colorful signs.\u003c/p>\n\u003cp>\u003cb>Valerie Sizemore: \u003c/b>\u003cem>[00:03:57] \u003c/em>I used to be a software engineer, but have been unemployed by AI. So now I’m trying to make the resistance happen.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:04:06] \u003c/em>I talked to one protester, Valerie Sizemore of Berkeley, who kind of represented, I think, a lot of Bayarians.\u003c/p>\n\u003cp>\u003cb>Valerie Sizemore: \u003c/b>\u003cem>[00:04:15] \u003c/em>I’m not here because I care about the outcome of this trial. I really don’t care. I hope it’s really expensive for someone and like hurts both companies as much as possible.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:04:27] \u003c/em>Yeah, and it’s, I guess, two-for-one for her to just be outside the courthouse protesting the both of them.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:04:34] \u003c/em>Exactly.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:04:39] \u003c/em>Well, Rachael, I wanna step back a little bit and talk about this trial and just how we even got here. I mean, remind us who is on trial and what exactly these two are fighting over?\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:04:53] \u003c/em>So it’s a little more than two people. Elon Musk is suing Sam Altman and also Greg Brockman, who is OpenAI’s co-founder and president. Musk is sueing OpenAI itself and also Microsoft, which invested $13 billion in OpenAI after Musk left.\u003c/p>\n\u003cp>\u003cb>Interviewer: \u003c/b>\u003cem>[00:05:18] \u003c/em>All right, we’re gonna wrap up the day. I’m gonna do a fireside chat with Sam Altman.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:05:25] \u003c/em>Let’s dial the clock all the way back to 2015. Musk and Altman found OpenAI as a nonprofit explicitly to develop artificial general intelligence safely and for the benefit of all humanity.\u003c/p>\n\u003cp>\u003cb>Sam Altman: \u003c/b>\u003cem>[00:05:44] \u003c/em>You know, I think AI will probably, like most likely, sort of lead to the end of the world, but in the meantime, there will be great companies created with serious machine learning. I actually just agreed to fund a company that is not even really a company, sort of a semi-company, semi-nonprofit, doing AI safety research.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:06:04] \u003c/em>At some point shortly thereafter, it became clear to all parties involved, including Musk, that they needed to establish a for-profit arm as well in order to raise money to pay for things like computing power for this very energy-intensive computer software and also to bring in talent, to bring the best minds of the industry. Musk’s lawsuit is arguing that thereabouts Altman and other co-founders of OpenAI, because there were other people involved, betrayed the mission, that they were actually in it for the profit.\u003c/p>\n\u003cp>\u003cb>Interviewer: \u003c/b>\u003cem>[00:06:45] \u003c/em>Open AI, I mean you seem somewhat frustrated with them. You were one of the big contributors early on?\u003c/p>\n\u003cp>\u003cb>Elon Musk: \u003c/b>\u003cem>[00:06:49] \u003c/em>The reason, I am the reason Open AI exists.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:06:54] \u003c/em>So he wants more than his money back. He wants Altman and OpenAI’s co-founder and president, Greg Brockman, taken off the board. And he wants $130 billion, disgorged by the for-profit and handed over to the non-profit. The word charity, Ericka, doesn’t appear once in OpenAI’s founding blog post, but Musk keeps referring to OpenAI as a charity. But as OpenAI lawyers like to point out, Musk left OpenAI and then he launched his own AI venture, XAI, which is not a nonprofit and arguably does not operate for the benefit of humanity, for which it has been sued repeatedly.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:07:50] \u003c/em>So it sounds like Elon Musk is basically saying they stole his charity, and Sam Altman is saying, ‘You chose to walk away.’\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:08:02] \u003c/em>Yeah. That’s it in a nutshell. There was this funny moment when Musk was on the witness stand. He looked at the jury and he said, quote, it’s not OK to steal a charity. And then he predicted that if Open AI wins this case, the face of charity law in America could be altered forever. At some point, the judge broke in and said, let’s remind the jury, you’re not a lawyer. She’s talking to Musk. And then he replied. I did take Law 101, which got a laugh out of most people in the court.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:08:36] \u003c/em>Geez.\u003c/p>\n\u003cp>\u003cb>\u003c/b>\u003cem>[00:08:39] \u003c/em>Rachael, what do we make of Sam Altman’s role in this? It sounds like Elon Musk is saying that Sam Altman lied to him.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:08:46] \u003c/em>That is a very good question. I need to mention here that we have not seen Sam Altman take the stand in this trial yet. So Altman has not yet had the chance to make his case. Just a few weeks ago, we saw a comprehensive profile of Sam Altman in the New Yorker magazine talking to lots and lots of people that Sam Altmann is an inveterate liar, the kind of person who will tell you what you want to hear and then go back on it. We haven’t had the opportunity yet to really get into what his character was like during the early days of OpenAI, but pretty much everyone in that courtroom has read that article.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:09:47] \u003c/em>Coming up, what the OpenAI trial is really about. Stay with us.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:10:38] \u003c/em>I mean, Rachael, I gotta say, as I’m reading these stories about this case, it really just sounds like a fight between two of some of the richest billionaires in Silicon Valley over this company that they co-founded. But obviously, what’s at the center of it and what is at stake is this very powerful technology that even they seem to acknowledge has the potential to change the world. So what do you think this is really about?\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:11:13] \u003c/em>Clearly about power, clearly about money, clearly about market dominance. And I do want to say that even though the judge is saying we are not going to talk about the AI apocalypse, it is something that is genuinely on the minds of all of these people in the industry in Silicon Valley and also the rest of us, right? I mean there are people here who take AI safety seriously. Who also think OpenAI has drifted dangerously from its mission. I mean, we’ve seen bad actors using the software who have upended the labor market, terrified all of us from a cybersecurity perspective, made it impossible to get redress as a customer and sometimes as a citizen, enabled a surveillance state here and abroad. I mean I could go on, Ericka, because It’s 100% clear to us and the people building this software that there’s a race to the bottom going on from a moral perspective.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:12:30] \u003c/em>I do want to ask you this question, Rachel, because Elon Musk is saying in this trial that he is the one standing up for the public on AI. Rachel, is there someone working in the public interest when it comes to AI and holding AI companies accountable?\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:12:53] \u003c/em>Well, you know, don’t we wish? He’d like to present himself as thinking pro-human first, but you know, he also created XAI and has reportedly personally directed his engineers to make XAI a manifestly unsafe product. The judge noted the irony out loud. She said to Musk’s attorneys at one point, It is ironic that your client, despite these risks, is creating a company in the exact same space. And then she added, and I just thought this was so remarkable, coming from, again, a sitting federal judge, quote, I suspect there are people who don’t want to put the future in Mr. Musk’s hands, unquote.\u003c/p>\n\u003cp>\u003cb>Jill Horowitz: \u003c/b>\u003cem>[00:13:44] \u003c/em>And in that sense, I don’t understand why Musk is the one who gets to ask that question. Jill Horowitz, who specifically specializes in non-profit law. At Northwestern’s law school put it this way when parties have this much money and this much power they can trample over conventional protections of the public interest\u003c/p>\n\u003cp>\u003cb>Jill Horowitz: \u003c/b>\u003cem>[00:14:07] \u003c/em>We’ve got a CEO who is a very powerful player. And then we have this outside party who’s purported to be thinking about the best interest of the nonprofit, but he’s a competitor.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:14:22] \u003c/em>Congress, you don’t need me to tell you, hasn’t passed any meaningful federal AI regulation. The Trump administration is lobbying alongside the lobbyists for unfettered freedom for the AI industry. And so we end up here, Ericka, in a federal courthouse in Oakland watching two billionaires fight over their recent past. This trial gives us a window into the wheeling and dealing. But it doesn’t give us any power to change the trajectory of AI.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:14:54] \u003c/em>Rachael what happens if if either Elon Musk or Sam Altman wins this trial?\u003c/p>\n\u003cp>\u003cb>\u003c/b>\u003cem>[00:15:01] \u003c/em>So if Musk wins, Judge Gonzalez-Rogers could order OpenAI to revert to a non-profit structure, remove Altman and Brockman, direct some $130 billion in gains back to the non- profit foundation. That would be legally unprecedented and would certainly send shockwaves throughout Silicon Valley. If OpenAI wins, the restructuring stands, the IPO proceeds. And the message to the industry is essentially, you can do this too. You can take a non-profit, make it nominally in charge of a for-profit arm that you build into a trillion-dollar company, and the legal system won’t stop you.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:15:52] \u003c/em>Last question for you, Rachael. For the protesters outside, what do you think they want to see happen? And do you they care here about who wins?\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:16:03] \u003c/em>My sense from talking with Ashley Ortiz, who was one of the organizers of the first and biggest protest outside, is that for a lot of the people out here carrying signs and wearing t-shirts that say stop AI, neither Musk nor Altman represents their interests and by extension the public’s interests.\u003c/p>\n\u003cp>\u003cb>Ashley Ortiz: \u003c/b>\u003cem>[00:16:28] \u003c/em>Decision everyone sucks here and y’all both need to take responsibility for your part in this crappy situation.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:16:34] \u003c/em>They want accountability for AI, period. I don’t know if they actually think they’re gonna get what they’re asking for, but they wanna make a noise while they can.\u003c/p>\n\u003cp>\u003cb>Ashley Ortiz: \u003c/b>\u003cem>[00:16:46] \u003c/em>We’re letting them both know that both sides, no matter which side wins, the people are going to lose because they are not doing this actually for the benefit of humanity. It’s not about ethics. This is all about power plays within an unfettered, unregulated AI scape.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:17:06] \u003c/em>These are the models that are changing our world, and they’re doing it now. And regardless of whether OpenAI survives this trial, we’re still gonna have the world that OpenAI helped to create.\u003c/p>\n\u003cp>\u003cb>Ericka Cruz Guevarra: \u003c/b>\u003cem>[00:17:28] \u003c/em>Well, Rachael, thank you so much for chatting with me outside the courtroom and for making the time in your busy morning, I appreciate it.\u003c/p>\n\u003cp>\u003cb>Rachael Myrow: \u003c/b>\u003cem>[00:17:36] \u003c/em>You bet.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">Some members of the KQED podcast team are represented by The Screen Actors Guild, American Federation of Television and Radio Artists, San Francisco-Northern California Local.\u003c/span>\u003c/i>\u003c/p>\n\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12082428/inside-sam-altman-and-elon-musks-battle-over-openai",
"authors": [
"8654",
"251",
"11649",
"11831"
],
"categories": [
"news_8",
"news_248"
],
"tags": [
"news_34755",
"news_32668",
"news_3897",
"news_36810",
"news_33812",
"news_35758",
"news_22598"
],
"featImg": "news_12082344",
"label": "source_news_12082428"
},
"news_12082064": {
"type": "posts",
"id": "news_12082064",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12082064",
"score": null,
"sort": [
1777676594000
]
},
"guestAuthors": [],
"slug": "openai-back-in-court-over-canada-school-shooters-use-of-chatgpt",
"title": "OpenAI Back in Court Over Canada School Shooter’s Use of ChatGPT",
"publishDate": 1777676594,
"format": "standard",
"headTitle": "OpenAI Back in Court Over Canada School Shooter’s Use of ChatGPT | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>The families of victims of a school shooting in a British Columbia town sued artificial intelligence company \u003ca href=\"https://www.kqed.org/news/tag/open-ai\">OpenAI \u003c/a>in a San Francisco court this week, alleging that the company behind \u003ca href=\"https://www.kqed.org/news/tag/chatgpt\">ChatGPT\u003c/a> failed to alert police of the shooter’s alarming interactions with the chatbot.\u003c/p>\n\u003cp>One of the lawsuits was filed on behalf of Shannda Aviugana-Durand, an education assistant who was shot and killed in a library at \u003ca href=\"https://docs.google.com/document/d/1BU49CY30r0KCfBs0NJuk5S0KJ2E5VEuIF2IpxdwviIo/edit?tab=t.0\">Tumbler Ridge Secondary School\u003c/a>. The suit alleges negligence, aiding and abetting a mass shooting, wrongful death and liability, among other claims. According to the lawsuit, Aviugana-Durand’s daughter was present at the time of the attack.\u003c/p>\n\u003cp>The educational assistant was one of six people who were killed by an 18-year-old in February. The teen — who later shot herself — also killed her mother and her 11-year-old half-brother at home beforehand. Twenty-five people were also injured in the attack, Canada’s deadliest mass shooting in years.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>Another lawsuit was filed Wednesday on behalf of 12-year-old Maya Gebala, who was critically injured in the February shooting. The plaintiffs’ attorney, Jay Edelson, said in an interview with the \u003cem>Associated Press\u003c/em> that decisions made by OpenAI and its CEO Sam Altman “have destroyed the town. The people are really resilient, but what happened is unimaginable.”\u003c/p>\n\u003cp>Altman sent a letter last week \u003ca href=\"https://apnews.com/article/openai-altman-tumbler-ridge-killings-apology-dec2adaad3946583519370eede6a99e2\">formally apologizing\u003c/a> to the community that his company did not notify law enforcement about the shooter’s online behavior in the weeks leading up to the attack.\u003c/p>\n\u003cp>The case highlights concerns about the harms posed by \u003ca href=\"https://apnews.com/article/ai-sycophancy-chatbots-science-study-8dc61e69278b661cab1e53d38b4173b6\">overly agreeable AI chatbots\u003c/a> and what obligations the tech industry has to control them or notify authorities about planned violence by chatbot users. This month, \u003ca href=\"https://apnews.com/article/missing-grad-students-florida-6279adeef3d0540865de39ab3d6f8093\">prosecutors investigating the deaths\u003c/a> of two University of South Florida doctoral students said that the suspect asked ChatGPT about body disposal in the lead-up to the students’ disappearance.\u003c/p>\n\u003cfigure id=\"attachment_12079761\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12079761 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/SamAltmanGetty2.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/SamAltmanGetty2.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/SamAltmanGetty2-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/SamAltmanGetty2-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">OpenAI CEO Sam Altman speaks during the BlackRock Infrastructure Summit on March 11, 2026, in Washington, D.C. \u003ccite>(Anna Moneymaker/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“It’s not the first lawsuit of its kind,” said Robin Feldman, law professor at UC Law San Francisco and director of its AI Law and Innovation Institute. “This is part of an early wave of lawsuits in which citizens are asking to hold LLMs responsible for harms that happen down the line, whether they are crimes, mental health problems, suicide.”\u003c/p>\n\u003cp>“ChatGPT was first on the scene. And it is the most widely known of the LLMs,” Feldman said. “That puts it in the hot seat as the law tries to understand how to wrangle this unusual beast.”\u003c/p>\n\u003cp>In response to the lawsuit, OpenAI said in a written statement that the “events in Tumbler Ridge are a tragedy. We have a zero-tolerance policy for using our tools to assist in committing violence.”[aside postID=news_12081916 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AP26118555622828-2000x1333.jpg']“As we shared with Canadian officials, we have already strengthened our safeguards, including improving how ChatGPT responds to signs of distress, connecting people with local support and mental health resources, strengthening how we assess and escalate potential threats of violence, and improving detection of repeat policy violators,” the company said.\u003c/p>\n\u003cp>Edelson, a Chicago-based lawyer known for taking on the tech industry, is already juggling a number of high-profile cases against OpenAI, including from the family of a California teenager who killed himself after \u003ca href=\"https://apnews.com/article/ai-chatbot-teens-congress-chatgpt-character-ce3959b6a3ea1a4997bf1ccabb4f0de2\">conversations with ChatGPT\u003c/a> and another from the heirs of an 83-year-old Connecticut woman \u003ca href=\"https://apnews.com/article/ai-chatgpt-wrongful-death-lawsuit-greenwich-97fd7da31c0fa08f3d3ea9efd6713151\">killed by her son\u003c/a> after ChatGPT allegedly amplified the man’s “paranoid delusions.”\u003c/p>\n\u003cp>“This is not a passive technology,” Edelson said, comparing the chatbot interactions with a more conventional online search for information. “What we’ve seen in the past is that (for) people who are mentally ill, the chatbot will validate what they’re saying and then amplify what they’re saying.”\u003c/p>\n\u003cp>Last week, Edelson visited the small town of Tumbler Ridge and met with dozens of people in the basement of a visitor center. He also visited Gebala at a children’s hospital in Vancouver, where she remains hospitalized and seemed alert but unable to speak.\u003c/p>\n\u003cp>“It was so heartbreaking,” he said.\u003c/p>\n\u003cfigure id=\"attachment_12082198\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12082198 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty2.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty2.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty2-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty2-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Candles, flowers, photographs, plush toys and other items at a makeshift memorial for the victims four days after a deadly mass shooting took place at a school, in the town of Tumbler Ridge, British Columbia, Canada, on Feb. 13, 2026. \u003ccite>(Paige Taylor White/AFP via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The lawsuits filed Wednesday also represent the families of the five slain children targeted in the school shooting: Zoey Benoit, Abel Mwansa Jr., Ticaria “Tiki” Lampert and Kylie Smith, all 12, and Ezekiel Schofield, 13.\u003c/p>\n\u003cp>After the shootings, OpenAI came forward to say that last June, the company flagged the shooter’s account as having been used to discuss violence against other people.\u003c/p>\n\u003cp>The company said it considered whether to refer the account to the Royal Canadian Mounted Police, but determined at the time that the account activity didn’t meet a threshold for referral to law enforcement. OpenAI banned the account in June for violating its usage policy.[aside postID=news_12080610 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2024/06/GettyImages-2155035557-1020x680.jpg']The lawsuits filed Wednesday allege “the victims didn’t learn this because OpenAI was forthcoming, but because \u003ca href=\"https://www.wsj.com/us-news/law/openai-employees-raised-alarms-about-canada-shooting-suspect-months-ago-b585df62\">its own employees leaked it to \u003cem>The Wall Street Journal\u003c/em>\u003c/a> after they could no longer stomach the company’s silence.”\u003c/p>\n\u003cp>In \u003ca href=\"https://tumblerridgelines.com/2026/04/24/openai-apologizes-to-tumbler-ridge/\">his letter\u003c/a>, Altman said he was “deeply sorry that we did not alert law enforcement to the account that was banned in June.”\u003c/p>\n\u003cp>“While I know words can never be enough, I believe an apology is necessary to recognize the harm and irreversible loss your community has suffered,” Altman wrote.\u003c/p>\n\u003cp>British Columbia Premier David Eby, \u003ca href=\"https://x.com/dave_eby/status/2047751590803886291?s=46&t=7BBzFwo6eYLzJIVfAlumEQ\">in a social media post\u003c/a>, called the apology “necessary, and yet grossly insufficient for the devastation done to the families of Tumbler Ridge.”\u003c/p>\n\u003cp>The Gebala lawsuit accuses OpenAI of negligence involving a failure to warn law enforcement and “aiding and abetting a mass shooting.”\u003c/p>\n\u003cp>Along with damages, the Gebala lawsuit seeks a court order that would require OpenAI to ban users from ChatGPT if their accounts were deactivated for violent misuse, and to require the company to alert law enforcement when its systems identify someone who poses a “real-world risk of violence.”\u003c/p>\n\u003cp>An earlier case was filed in a court in British Columbia, but a team of lawyers in both countries is seeking to bring the affiliated cases to San Francisco, where OpenAI is headquartered.\u003c/p>\n\u003ch2>‘Untried territory’\u003c/h2>\n\u003cp>Feldman called reports that the company flagged the risk but failed to act effectively “deeply troubling.”\u003c/p>\n\u003cp>“As with so much about AI, the lawsuit will take us into untried territory,” she said. “The old doctrines are being applied to new circumstances.”\u003c/p>\n\u003cp>She said if the families were to win, the company would have to pay damages and assume responsibility for altering its platform to identify and respond to risks.\u003c/p>\n\u003cp>The major issues that the lawsuit will tackle are whether OpenAI and ChatGPT are protected by the First Amendment and whether or not OpenAI had “a duty to act,” she said.\u003c/p>\n\u003cfigure id=\"attachment_12082201\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12082201 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty3.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty3.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty3-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty3-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Community members attend a vigil to honor the victims of one of Canada’s deadliest mass shootings in Tumbler Ridge, British Columbia, Canada, on Feb. 13, 2026. \u003ccite>(Paige Taylor White/AFP via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>She said that there are \u003ca href=\"https://www.congress.gov/crs-product/R46751\">parts\u003c/a> of U.S. law that shield tech companies from liability for content that their users host. Essentially, this means platforms are more like “bulletin boards” and “are not responsible for the content.”\u003c/p>\n\u003cp>But this case would raise the question, she said, “Are LLMs like a bulletin board or publisher? Or they like a facilitator who helped with the crime?”\u003c/p>\n\u003cp>Some companies struggle with the burden of responsibility when reviewing potential threats to public safety, Feldman said, “If they try to help out, they can be viewed as accepting the mantle of responsibility.”\u003c/p>\n\u003cp>According to Feldman, families are also likely to argue that the LLM “is a defective product without appropriate safeguards.\u003c/p>\n\u003cp>“In that case, the question is the following: ‘Is the LLM a defective product, or merely a product that was used improperly? And is it analogous to a product at all?”\u003c/p>\n\u003cp>“All of these are tough questions as we enter the age of AI, and the courts are just beginning to explore them,” Feldman said.\u003c/p>\n\u003cp>\u003cem>The Associated Press’ Jim Morris contributed to this story.\u003c/em>\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "The lawsuit alleges negligence and wrongful death on account of the shooter’s interactions with the chatbot in the weeks and months leading up to the fatal attack.",
"status": "publish",
"parent": 0,
"modified": 1777678175,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 35,
"wordCount": 1495
},
"headData": {
"title": "OpenAI Back in Court Over Canada School Shooter’s Use of ChatGPT | KQED",
"description": "The lawsuit alleges negligence and wrongful death on account of the shooter’s interactions with the chatbot in the weeks and months leading up to the fatal attack.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "OpenAI Back in Court Over Canada School Shooter’s Use of ChatGPT",
"datePublished": "2026-05-01T16:03:14-07:00",
"dateModified": "2026-05-01T16:29:35-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 34167,
"slug": "criminal-justice",
"name": "Criminal Justice"
},
"sticky": false,
"nprByline": "Matt O’Brien, Associated Press, and Nisa Khan, KQED",
"nprStoryId": "kqed-12082064",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"showOnAuthorArchivePages": "Yes",
"articleAge": "0",
"path": "/news/12082064/openai-back-in-court-over-canada-school-shooters-use-of-chatgpt",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>The families of victims of a school shooting in a British Columbia town sued artificial intelligence company \u003ca href=\"https://www.kqed.org/news/tag/open-ai\">OpenAI \u003c/a>in a San Francisco court this week, alleging that the company behind \u003ca href=\"https://www.kqed.org/news/tag/chatgpt\">ChatGPT\u003c/a> failed to alert police of the shooter’s alarming interactions with the chatbot.\u003c/p>\n\u003cp>One of the lawsuits was filed on behalf of Shannda Aviugana-Durand, an education assistant who was shot and killed in a library at \u003ca href=\"https://docs.google.com/document/d/1BU49CY30r0KCfBs0NJuk5S0KJ2E5VEuIF2IpxdwviIo/edit?tab=t.0\">Tumbler Ridge Secondary School\u003c/a>. The suit alleges negligence, aiding and abetting a mass shooting, wrongful death and liability, among other claims. According to the lawsuit, Aviugana-Durand’s daughter was present at the time of the attack.\u003c/p>\n\u003cp>The educational assistant was one of six people who were killed by an 18-year-old in February. The teen — who later shot herself — also killed her mother and her 11-year-old half-brother at home beforehand. Twenty-five people were also injured in the attack, Canada’s deadliest mass shooting in years.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>Another lawsuit was filed Wednesday on behalf of 12-year-old Maya Gebala, who was critically injured in the February shooting. The plaintiffs’ attorney, Jay Edelson, said in an interview with the \u003cem>Associated Press\u003c/em> that decisions made by OpenAI and its CEO Sam Altman “have destroyed the town. The people are really resilient, but what happened is unimaginable.”\u003c/p>\n\u003cp>Altman sent a letter last week \u003ca href=\"https://apnews.com/article/openai-altman-tumbler-ridge-killings-apology-dec2adaad3946583519370eede6a99e2\">formally apologizing\u003c/a> to the community that his company did not notify law enforcement about the shooter’s online behavior in the weeks leading up to the attack.\u003c/p>\n\u003cp>The case highlights concerns about the harms posed by \u003ca href=\"https://apnews.com/article/ai-sycophancy-chatbots-science-study-8dc61e69278b661cab1e53d38b4173b6\">overly agreeable AI chatbots\u003c/a> and what obligations the tech industry has to control them or notify authorities about planned violence by chatbot users. This month, \u003ca href=\"https://apnews.com/article/missing-grad-students-florida-6279adeef3d0540865de39ab3d6f8093\">prosecutors investigating the deaths\u003c/a> of two University of South Florida doctoral students said that the suspect asked ChatGPT about body disposal in the lead-up to the students’ disappearance.\u003c/p>\n\u003cfigure id=\"attachment_12079761\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12079761 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/SamAltmanGetty2.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/SamAltmanGetty2.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/SamAltmanGetty2-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/SamAltmanGetty2-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">OpenAI CEO Sam Altman speaks during the BlackRock Infrastructure Summit on March 11, 2026, in Washington, D.C. \u003ccite>(Anna Moneymaker/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“It’s not the first lawsuit of its kind,” said Robin Feldman, law professor at UC Law San Francisco and director of its AI Law and Innovation Institute. “This is part of an early wave of lawsuits in which citizens are asking to hold LLMs responsible for harms that happen down the line, whether they are crimes, mental health problems, suicide.”\u003c/p>\n\u003cp>“ChatGPT was first on the scene. And it is the most widely known of the LLMs,” Feldman said. “That puts it in the hot seat as the law tries to understand how to wrangle this unusual beast.”\u003c/p>\n\u003cp>In response to the lawsuit, OpenAI said in a written statement that the “events in Tumbler Ridge are a tragedy. We have a zero-tolerance policy for using our tools to assist in committing violence.”\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12081916",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/AP26118555622828-2000x1333.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>“As we shared with Canadian officials, we have already strengthened our safeguards, including improving how ChatGPT responds to signs of distress, connecting people with local support and mental health resources, strengthening how we assess and escalate potential threats of violence, and improving detection of repeat policy violators,” the company said.\u003c/p>\n\u003cp>Edelson, a Chicago-based lawyer known for taking on the tech industry, is already juggling a number of high-profile cases against OpenAI, including from the family of a California teenager who killed himself after \u003ca href=\"https://apnews.com/article/ai-chatbot-teens-congress-chatgpt-character-ce3959b6a3ea1a4997bf1ccabb4f0de2\">conversations with ChatGPT\u003c/a> and another from the heirs of an 83-year-old Connecticut woman \u003ca href=\"https://apnews.com/article/ai-chatgpt-wrongful-death-lawsuit-greenwich-97fd7da31c0fa08f3d3ea9efd6713151\">killed by her son\u003c/a> after ChatGPT allegedly amplified the man’s “paranoid delusions.”\u003c/p>\n\u003cp>“This is not a passive technology,” Edelson said, comparing the chatbot interactions with a more conventional online search for information. “What we’ve seen in the past is that (for) people who are mentally ill, the chatbot will validate what they’re saying and then amplify what they’re saying.”\u003c/p>\n\u003cp>Last week, Edelson visited the small town of Tumbler Ridge and met with dozens of people in the basement of a visitor center. He also visited Gebala at a children’s hospital in Vancouver, where she remains hospitalized and seemed alert but unable to speak.\u003c/p>\n\u003cp>“It was so heartbreaking,” he said.\u003c/p>\n\u003cfigure id=\"attachment_12082198\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12082198 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty2.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty2.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty2-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty2-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Candles, flowers, photographs, plush toys and other items at a makeshift memorial for the victims four days after a deadly mass shooting took place at a school, in the town of Tumbler Ridge, British Columbia, Canada, on Feb. 13, 2026. \u003ccite>(Paige Taylor White/AFP via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The lawsuits filed Wednesday also represent the families of the five slain children targeted in the school shooting: Zoey Benoit, Abel Mwansa Jr., Ticaria “Tiki” Lampert and Kylie Smith, all 12, and Ezekiel Schofield, 13.\u003c/p>\n\u003cp>After the shootings, OpenAI came forward to say that last June, the company flagged the shooter’s account as having been used to discuss violence against other people.\u003c/p>\n\u003cp>The company said it considered whether to refer the account to the Royal Canadian Mounted Police, but determined at the time that the account activity didn’t meet a threshold for referral to law enforcement. OpenAI banned the account in June for violating its usage policy.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12080610",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/06/GettyImages-2155035557-1020x680.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>The lawsuits filed Wednesday allege “the victims didn’t learn this because OpenAI was forthcoming, but because \u003ca href=\"https://www.wsj.com/us-news/law/openai-employees-raised-alarms-about-canada-shooting-suspect-months-ago-b585df62\">its own employees leaked it to \u003cem>The Wall Street Journal\u003c/em>\u003c/a> after they could no longer stomach the company’s silence.”\u003c/p>\n\u003cp>In \u003ca href=\"https://tumblerridgelines.com/2026/04/24/openai-apologizes-to-tumbler-ridge/\">his letter\u003c/a>, Altman said he was “deeply sorry that we did not alert law enforcement to the account that was banned in June.”\u003c/p>\n\u003cp>“While I know words can never be enough, I believe an apology is necessary to recognize the harm and irreversible loss your community has suffered,” Altman wrote.\u003c/p>\n\u003cp>British Columbia Premier David Eby, \u003ca href=\"https://x.com/dave_eby/status/2047751590803886291?s=46&t=7BBzFwo6eYLzJIVfAlumEQ\">in a social media post\u003c/a>, called the apology “necessary, and yet grossly insufficient for the devastation done to the families of Tumbler Ridge.”\u003c/p>\n\u003cp>The Gebala lawsuit accuses OpenAI of negligence involving a failure to warn law enforcement and “aiding and abetting a mass shooting.”\u003c/p>\n\u003cp>Along with damages, the Gebala lawsuit seeks a court order that would require OpenAI to ban users from ChatGPT if their accounts were deactivated for violent misuse, and to require the company to alert law enforcement when its systems identify someone who poses a “real-world risk of violence.”\u003c/p>\n\u003cp>An earlier case was filed in a court in British Columbia, but a team of lawyers in both countries is seeking to bring the affiliated cases to San Francisco, where OpenAI is headquartered.\u003c/p>\n\u003ch2>‘Untried territory’\u003c/h2>\n\u003cp>Feldman called reports that the company flagged the risk but failed to act effectively “deeply troubling.”\u003c/p>\n\u003cp>“As with so much about AI, the lawsuit will take us into untried territory,” she said. “The old doctrines are being applied to new circumstances.”\u003c/p>\n\u003cp>She said if the families were to win, the company would have to pay damages and assume responsibility for altering its platform to identify and respond to risks.\u003c/p>\n\u003cp>The major issues that the lawsuit will tackle are whether OpenAI and ChatGPT are protected by the First Amendment and whether or not OpenAI had “a duty to act,” she said.\u003c/p>\n\u003cfigure id=\"attachment_12082201\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12082201 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty3.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty3.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty3-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/05/TumblerRidgeGetty3-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Community members attend a vigil to honor the victims of one of Canada’s deadliest mass shootings in Tumbler Ridge, British Columbia, Canada, on Feb. 13, 2026. \u003ccite>(Paige Taylor White/AFP via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>She said that there are \u003ca href=\"https://www.congress.gov/crs-product/R46751\">parts\u003c/a> of U.S. law that shield tech companies from liability for content that their users host. Essentially, this means platforms are more like “bulletin boards” and “are not responsible for the content.”\u003c/p>\n\u003cp>But this case would raise the question, she said, “Are LLMs like a bulletin board or publisher? Or they like a facilitator who helped with the crime?”\u003c/p>\n\u003cp>Some companies struggle with the burden of responsibility when reviewing potential threats to public safety, Feldman said, “If they try to help out, they can be viewed as accepting the mantle of responsibility.”\u003c/p>\n\u003cp>According to Feldman, families are also likely to argue that the LLM “is a defective product without appropriate safeguards.\u003c/p>\n\u003cp>“In that case, the question is the following: ‘Is the LLM a defective product, or merely a product that was used improperly? And is it analogous to a product at all?”\u003c/p>\n\u003cp>“All of these are tough questions as we enter the age of AI, and the courts are just beginning to explore them,” Feldman said.\u003c/p>\n\u003cp>\u003cem>The Associated Press’ Jim Morris contributed to this story.\u003c/em>\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12082064/openai-back-in-court-over-canada-school-shooters-use-of-chatgpt",
"authors": [
"byline_news_12082064"
],
"categories": [
"news_34167",
"news_28250",
"news_8"
],
"tags": [
"news_34755",
"news_1386",
"news_32668",
"news_17725",
"news_22434",
"news_35784",
"news_33542",
"news_33543",
"news_38"
],
"featImg": "news_12082068",
"label": "news"
},
"news_12081798": {
"type": "posts",
"id": "news_12081798",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12081798",
"score": null,
"sort": [
1777507270000
]
},
"guestAuthors": [],
"slug": "elon-musk-says-sam-altman-tricked-him-into-funding-openai",
"title": "Elon Musk Says Sam Altman Tricked Him Into Funding OpenAI",
"publishDate": 1777507270,
"format": "standard",
"headTitle": "Elon Musk Says Sam Altman Tricked Him Into Funding OpenAI | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>During the second day of the \u003ca href=\"https://www.kqed.org/news/12081603/elon-musk-takes-aim-at-openai-as-trial-begins-its-not-ok-to-steal-a-charity\">landmark trial between Sam Altman and Elon Musk\u003c/a>, the Tesla founder told the Oakland courthouse that he was a “fool” to fund OpenAI through its early years.\u003c/p>\n\u003cp>Testifying in the lawsuit he brought against Altman, which claims the company’s creators betrayed their mission for profits, Musk suggested Wednesday that Altman and cofounder Greg Brockman wanted to “have your cake and eat it too.”\u003c/p>\n\u003cp>“If you go nonprofit, you’ve got a sort of moral high ground,” he testified.\u003c/p>\n\u003cp>Musk’s testimony tells one version of founding OpenAI: that he, fearing the dangers of artificial intelligence, pursued its development with the goal of benefiting the common good, alongside, he thought, like-minded collaborators. But behind the scenes, those cofounders engaged in a “long con” to profit at his expense.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>“What they really wanted was a for-profit, where they could make as much money as possible,” Musk said later.\u003c/p>\n\u003cp>Whether the jury believes him will be integral to the decision they’re tasked with making, as they determine whether OpenAI breached charitable trust and engaged in unjust enrichment as it evolved from a nonprofit organization to its current $730 billion iteration.\u003c/p>\n\u003cp>Under cross-examination, Altman’s attorney, William Savitt, questioned Musk’s story and credibility as an altruistic benefactor. He pointed to an email Musk sent to Altman in 2015, which said it would be “probably better” if OpenAI operated as a for-profit company with a parallel nonprofit.\u003c/p>\n\u003cfigure id=\"attachment_12081637\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081637\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">OpenAI’s lead counsel, William Savitt, presents opening statements in the trial in which Elon Musk claims that Sam Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>In another email sent to colleagues at his neurotechnology company, Neuralink, Musk said that Google’s AI development was moving very fast, and that he was concerned OpenAI was not on the path to catch up.\u003c/p>\n\u003cp>“Setting it up as a nonprofit might, in hindsight, have been the wrong move,” Musk wrote. “Sense of urgency is not as high.”\u003c/p>\n\u003cp>Savitt asked if, in 2017, Musk suggested at a party that OpenAI should create a for-profit. He said it was just after the company’s AI model had beaten \u003cem>Defense of the Ancients, \u003c/em>a battle video game, which was a pivotal moment in the development process.\u003c/p>\n\u003cp>Musk said he didn’t remember giving instructions to create a for-profit at the time.\u003c/p>\n\u003cp>“This was nine years ago,” he said.[aside postID=news_12081603 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED.jpg']Savitt said Tuesday that in 2017, OpenAI executives, including Musk, were in the midst of conversations about whether and how to transition the company to a for-profit structure.\u003c/p>\n\u003cp>According to OpenAI’s court filings, as early as summer 2017, Musk had insisted on holding a majority equity stake in any for-profit entity, serving as CEO and controlling its board of directors.\u003c/p>\n\u003cp>Pressed by Savitt about what Musk meant by “expressing what you said about control,” the Tesla founder and billionaire said: “I try to be as literal as possible.”\u003c/p>\n\u003cp>In the fall of 2017, Brockman and Ilya Sutskever, another top OpenAI executive, emailed Musk with concerns about the for-profit structure he proposed. Shortly thereafter, discussions over the structure collapsed, and Musk stopped making significant quarterly funding contributions, OpenAI alleges.\u003c/p>\n\u003cp>He left the company less than six months later.\u003c/p>\n\u003cp>Savitt framed the breakdown and Musk’s exit as a result of his not getting control of the for-profit, and the other executives’ focus on maintaining its philanthropic mission. He suggested that Musk tried to pressure them to accept his terms by pausing the majority of his financial backing.\u003c/p>\n\u003cp>“You knew that would create financial pressure for the organization,” Savitt said.\u003c/p>\n\u003cfigure id=\"attachment_12081686\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081686\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Steve Molo, Elon Musk’s attorney, presents opening statements in the trial in which Elon Musk (center-right) claims that Sam Altman (right) and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Musk denied that was his intention. Instead, he alleged that Altman convinced Brockman and the others to go against his proposal, and that their concern over his desire for control was disingenuous.\u003c/p>\n\u003cp>“I’m not going to fund something if I don’t have confidence in the people,” he said.\u003c/p>\n\u003cp>When asked whether he proposed that OpenAI be folded into Tesla, Musk said: “There were a lot of ideas that were brainstormed at the time.”\u003c/p>\n\u003cp>In an email, he wrote that doing so would be the “only path that could even hope to hold a candle to Google.”\u003c/p>\n\u003cp>Musk said he left OpenAI in February 2018 because he was focused on Tesla’s survival, and believed that OpenAI intended to continue operating as a nonprofit.\u003c/p>\n\u003cp>Savitt also laid out a series of exchanges between Musk and Altman, in which the OpenAI CEO kept him apprised of the company’s corporate structure. He said in March 2018, Musk responded to an email that noted the creation of a for-profit entity of OpenAI with “OK by me,” and was sent a term sheet for OpenAI LP that summer.[aside postID=news_12081290 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED.jpg']Savitt also said Altman emailed Musk a draft of the company’s public announcement of its for-profit arm in March 2019, and texted him asking if he had time to talk about Microsoft’s plan to invest in OpenAI. Musk never responded to that text, according to Savitt.\u003c/p>\n\u003cp>Musk said he was busy with his other companies in 2018, and while he was aware that it had added a for-profit entity, he hadn’t lost complete faith in the company. While he’d suspended quarterly $5 million funding contributions prior to his departure, he continued to make some contributions until 2020.\u003c/p>\n\u003cp>He said that he’d gone from enthusiastically supportive to uncertain about OpenAI’s mission, but that he’d fully suspended his contributions when he felt that the company was “deliberately not a nonprofit.”\u003c/p>\n\u003cp>When asked why he waited until 2024 to bring the suit, Musk said that’s when he determined OpenAI breached charitable trust.\u003c/p>\n\u003cp>“Thinking that someone might steal your car is not the same as [if] someone has stolen your car,” Musk said. He said after enlisting his attorney, Alex Spiro, to investigate, he heard from him in 2023 that “the car had been stolen.”\u003c/p>\n\u003cp>“I would have sued sooner if I thought the charity had been stolen sooner,” Musk continued.\u003c/p>\n\u003cp>The trial and Musk’s testimony are expected to continue on Thursday.\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "On the second day of a trial pitting the Tesla founder against OpenAI, Elon Musk said he was a “fool” to support the company behind ChatGPT during its early years.",
"status": "publish",
"parent": 0,
"modified": 1777509912,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 32,
"wordCount": 1208
},
"headData": {
"title": "Elon Musk Says Sam Altman Tricked Him Into Funding OpenAI | KQED",
"description": "On the second day of a trial pitting the Tesla founder against OpenAI, Elon Musk said he was a “fool” to support the company behind ChatGPT during its early years.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Elon Musk Says Sam Altman Tricked Him Into Funding OpenAI",
"datePublished": "2026-04-29T17:01:10-07:00",
"dateModified": "2026-04-29T17:45:12-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"sticky": false,
"nprStoryId": "kqed-12081798",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12081798/elon-musk-says-sam-altman-tricked-him-into-funding-openai",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>During the second day of the \u003ca href=\"https://www.kqed.org/news/12081603/elon-musk-takes-aim-at-openai-as-trial-begins-its-not-ok-to-steal-a-charity\">landmark trial between Sam Altman and Elon Musk\u003c/a>, the Tesla founder told the Oakland courthouse that he was a “fool” to fund OpenAI through its early years.\u003c/p>\n\u003cp>Testifying in the lawsuit he brought against Altman, which claims the company’s creators betrayed their mission for profits, Musk suggested Wednesday that Altman and cofounder Greg Brockman wanted to “have your cake and eat it too.”\u003c/p>\n\u003cp>“If you go nonprofit, you’ve got a sort of moral high ground,” he testified.\u003c/p>\n\u003cp>Musk’s testimony tells one version of founding OpenAI: that he, fearing the dangers of artificial intelligence, pursued its development with the goal of benefiting the common good, alongside, he thought, like-minded collaborators. But behind the scenes, those cofounders engaged in a “long con” to profit at his expense.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>“What they really wanted was a for-profit, where they could make as much money as possible,” Musk said later.\u003c/p>\n\u003cp>Whether the jury believes him will be integral to the decision they’re tasked with making, as they determine whether OpenAI breached charitable trust and engaged in unjust enrichment as it evolved from a nonprofit organization to its current $730 billion iteration.\u003c/p>\n\u003cp>Under cross-examination, Altman’s attorney, William Savitt, questioned Musk’s story and credibility as an altruistic benefactor. He pointed to an email Musk sent to Altman in 2015, which said it would be “probably better” if OpenAI operated as a for-profit company with a parallel nonprofit.\u003c/p>\n\u003cfigure id=\"attachment_12081637\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081637\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-01-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">OpenAI’s lead counsel, William Savitt, presents opening statements in the trial in which Elon Musk claims that Sam Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>In another email sent to colleagues at his neurotechnology company, Neuralink, Musk said that Google’s AI development was moving very fast, and that he was concerned OpenAI was not on the path to catch up.\u003c/p>\n\u003cp>“Setting it up as a nonprofit might, in hindsight, have been the wrong move,” Musk wrote. “Sense of urgency is not as high.”\u003c/p>\n\u003cp>Savitt asked if, in 2017, Musk suggested at a party that OpenAI should create a for-profit. He said it was just after the company’s AI model had beaten \u003cem>Defense of the Ancients, \u003c/em>a battle video game, which was a pivotal moment in the development process.\u003c/p>\n\u003cp>Musk said he didn’t remember giving instructions to create a for-profit at the time.\u003c/p>\n\u003cp>“This was nine years ago,” he said.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12081603",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-02-KQED.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Savitt said Tuesday that in 2017, OpenAI executives, including Musk, were in the midst of conversations about whether and how to transition the company to a for-profit structure.\u003c/p>\n\u003cp>According to OpenAI’s court filings, as early as summer 2017, Musk had insisted on holding a majority equity stake in any for-profit entity, serving as CEO and controlling its board of directors.\u003c/p>\n\u003cp>Pressed by Savitt about what Musk meant by “expressing what you said about control,” the Tesla founder and billionaire said: “I try to be as literal as possible.”\u003c/p>\n\u003cp>In the fall of 2017, Brockman and Ilya Sutskever, another top OpenAI executive, emailed Musk with concerns about the for-profit structure he proposed. Shortly thereafter, discussions over the structure collapsed, and Musk stopped making significant quarterly funding contributions, OpenAI alleges.\u003c/p>\n\u003cp>He left the company less than six months later.\u003c/p>\n\u003cp>Savitt framed the breakdown and Musk’s exit as a result of his not getting control of the for-profit, and the other executives’ focus on maintaining its philanthropic mission. He suggested that Musk tried to pressure them to accept his terms by pausing the majority of his financial backing.\u003c/p>\n\u003cp>“You knew that would create financial pressure for the organization,” Savitt said.\u003c/p>\n\u003cfigure id=\"attachment_12081686\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081686\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Steve Molo, Elon Musk’s attorney, presents opening statements in the trial in which Elon Musk (center-right) claims that Sam Altman (right) and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Musk denied that was his intention. Instead, he alleged that Altman convinced Brockman and the others to go against his proposal, and that their concern over his desire for control was disingenuous.\u003c/p>\n\u003cp>“I’m not going to fund something if I don’t have confidence in the people,” he said.\u003c/p>\n\u003cp>When asked whether he proposed that OpenAI be folded into Tesla, Musk said: “There were a lot of ideas that were brainstormed at the time.”\u003c/p>\n\u003cp>In an email, he wrote that doing so would be the “only path that could even hope to hold a candle to Google.”\u003c/p>\n\u003cp>Musk said he left OpenAI in February 2018 because he was focused on Tesla’s survival, and believed that OpenAI intended to continue operating as a nonprofit.\u003c/p>\n\u003cp>Savitt also laid out a series of exchanges between Musk and Altman, in which the OpenAI CEO kept him apprised of the company’s corporate structure. He said in March 2018, Musk responded to an email that noted the creation of a for-profit entity of OpenAI with “OK by me,” and was sent a term sheet for OpenAI LP that summer.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12081290",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Savitt also said Altman emailed Musk a draft of the company’s public announcement of its for-profit arm in March 2019, and texted him asking if he had time to talk about Microsoft’s plan to invest in OpenAI. Musk never responded to that text, according to Savitt.\u003c/p>\n\u003cp>Musk said he was busy with his other companies in 2018, and while he was aware that it had added a for-profit entity, he hadn’t lost complete faith in the company. While he’d suspended quarterly $5 million funding contributions prior to his departure, he continued to make some contributions until 2020.\u003c/p>\n\u003cp>He said that he’d gone from enthusiastically supportive to uncertain about OpenAI’s mission, but that he’d fully suspended his contributions when he felt that the company was “deliberately not a nonprofit.”\u003c/p>\n\u003cp>When asked why he waited until 2024 to bring the suit, Musk said that’s when he determined OpenAI breached charitable trust.\u003c/p>\n\u003cp>“Thinking that someone might steal your car is not the same as [if] someone has stolen your car,” Musk said. He said after enlisting his attorney, Alex Spiro, to investigate, he heard from him in 2023 that “the car had been stolen.”\u003c/p>\n\u003cp>“I would have sued sooner if I thought the charity had been stolen sooner,” Musk continued.\u003c/p>\n\u003cp>The trial and Musk’s testimony are expected to continue on Thursday.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12081798/elon-musk-says-sam-altman-tricked-him-into-funding-openai",
"authors": [
"11913",
"251"
],
"categories": [
"news_31795",
"news_6188",
"news_8",
"news_248"
],
"tags": [
"news_34755",
"news_32668",
"news_3897",
"news_27626",
"news_19954",
"news_21891",
"news_34054",
"news_33542",
"news_33543",
"news_34586",
"news_1631",
"news_57"
],
"featImg": "news_12081681",
"label": "news"
},
"news_12081603": {
"type": "posts",
"id": "news_12081603",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12081603",
"score": null,
"sort": [
1777421165000
]
},
"guestAuthors": [],
"slug": "elon-musk-takes-aim-at-openai-as-trial-begins-its-not-ok-to-steal-a-charity",
"title": "Elon Musk Takes Aim at OpenAI as Trial Begins: ‘It’s Not OK to Steal a Charity’",
"publishDate": 1777421165,
"format": "standard",
"headTitle": "Elon Musk Takes Aim at OpenAI as Trial Begins: ‘It’s Not OK to Steal a Charity’ | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>In a federal courtroom in Oakland on Tuesday, attorneys for tech elites Sam Altman and Elon Musk set the stage for a \u003ca href=\"https://www.kqed.org/news/12081290/how-to-unscramble-an-omelet-in-silicon-valley-the-musk-v-altman-trial-that-will-try\">landmark case to determine whether OpenAI\u003c/a>, one of the most powerful artificial intelligence companies in the world, was founded on a lie.\u003c/p>\n\u003cp>At issue is whether the company’s stated mission — to lead AI development to benefit the common good — was authentic or a deceptive pitch designed to attract talent and investment. \u003ca href=\"https://www.kqed.org/forum/2010101912956/its-elon-musks-world-were-just-living-in-it\">Musk\u003c/a> alleges that co-founders Altman and Greg Brockman, who remains Altman’s second-in-command, participated in a “long con” to enrich themselves at his expense, after the three co-founded OpenAI as a nonprofit in 2015.\u003c/p>\n\u003cp>“They’re going to make this lawsuit very complicated, but it’s very simple,” Musk said of OpenAI on the stand on Tuesday afternoon. “It’s not OK to steal a charity.”\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>He departed the company after a falling out and \u003ca href=\"https://www.courtlistener.com/docket/69013420/musk-v-altman/\">sued the company\u003c/a> in 2024, alleging that OpenAI had breached charitable trust by restructuring as a for-profit company, now valued at more than $800 billion.\u003c/p>\n\u003cp>But Altman’s attorneys called the Tesla CEO’s behavior “a tale of two Musks,” shifting from pushing for OpenAI to become a for-profit company under his control, to caring about its nonprofit status only after launching competitor xAI in 2023. They argue OpenAI’s decision to adopt a for-profit structure was integral to its survival.\u003c/p>\n\u003cp>“We’re here because Mr. Musk didn’t get his way,” William Savitt, Altman’s lead attorney, said Tuesday. “And because he’s a competitor, he’ll do anything he can to attack OpenAI.”\u003c/p>\n\u003cfigure id=\"attachment_12081686\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081686\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Steve Molo, Elon Musk’s attorney, presents opening statements in the trial in which Elon Musk (center-right) claims that Sam Altman (right) and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Steven Molo, Musk’s counsel, told the jury that when Musk, Altman and Brockman set out to found an AI nonprofit, their goals were to develop the technology safely and for the \u003ca href=\"https://www.kqed.org/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises\">benefit of humanity\u003c/a>.\u003c/p>\n\u003cp>“It wasn’t a technology to get rich,” he said.\u003c/p>\n\u003cp>After operating as a strict nonprofit for years, OpenAI added a for-profit arm in 2019, which executives said was necessary to obtain the funding needed to develop artificial general intelligence — a more advanced AI technology that surpasses human intelligence, according to court filings.\u003c/p>\n\u003cp>In early conversations about how the for-profit entity would work, Molo said, the structure was likened to a museum gift shop whose revenue funds the institution’s galleries and operations. Brockman and Altman reassured Musk that they were still committed to the nonprofit structure, he said.\u003c/p>\n\u003cp>But behind the scenes, Molo alleges that the other co-founders had more lucrative desires.[aside postID=news_12081290 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED.jpg']In court filings, he cited a journal in which Brockman wrote that “it would be nice to be making the billions … we’ve been thinking that maybe we should just flip to a for-profit. making the money for us sounds great and all.”\u003c/p>\n\u003cp>Brockman also wrote that he and another top OpenAI executive, Ilya Sutskever, “cannot say that we are committed to the non-profit. don’t wanna say that we’re committed. If three months later we’re doing B-Corp [a certification for for-profit corporations with social and environmental missions], then it was a lie.”\u003c/p>\n\u003cp>Years later, after Musk had departed OpenAI, the company was “no longer operating for the good of humanity,” Molo said.\u003c/p>\n\u003cp>“The museum store sold the Picassos,” he said.\u003c/p>\n\u003cp>Musk’s lawsuit claims OpenAI breached charitable trust and alleges unjust enrichment, which means that one party unfairly benefits at the expense of another. He also accuses Microsoft, which is the company’s largest financial backer and until this week held the exclusive rights to license and sell its technology, of aiding and abetting OpenAI’s breach of charitable trust.\u003c/p>\n\u003cp>OpenAI’s defense, meanwhile, alleges that Musk’s suit is less motivated by a desire to do good than it is by vengeance for his former colleagues, whose company is now eyeing an initial public offering valued at up to $1 trillion.\u003c/p>\n\u003cp>“Musk sat on his claims for years,” Savitt said. “He knew everything that was happening when it was happening. My clients had the nerve to go out and succeed without him.”\u003c/p>\n\u003cp>He also pointed out that Musk launched xAI a year before bringing the lawsuit, which would make OpenAI his competitor.\u003c/p>\n\u003cfigure id=\"attachment_12081681\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081681\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Representing Microsoft, Russell Coan (left) speaks as Elon Musk watches in the trial in which Elon Musk claims that Sam Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Savitt pointed to moments early in OpenAI’s development, when Musk suggested that it would be “probably better” for the company to operate as a “standard C corp[oration] with a parallel nonprofit.” He initially promised to cover the balance of the funding it needed, but reneged when he didn’t get to control the company, Savitt told the jury.\u003c/p>\n\u003cp>Musk was in the middle of the conversations about pivoting from a nonprofit, Savitt said. As early as the summer of 2017, he insisted on holding a majority equity stake in any for-profit entity, as well as controlling its board of directors and serving as CEO, according to OpenAI’s court filings.\u003c/p>\n\u003cp>In the fall of that year, after Brockman and Sutskever emailed Musk with concerns about the for-profit structure he proposed, the discussions collapsed, OpenAI alleges. After that, Musk stopped making significant quarterly funding contributions, and he left the company less than six months later.\u003c/p>\n\u003cp>Around that time, Brockman and Altman moved to pursue a for-profit arm — a decision their attorneys say they told Musk about prior to his departure from the board.[aside postID=news_12079896 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Daniel-Moreno-Gama-AP.jpg']Savitt said in court that Musk had given the company less than 4% of the funding he’d promised. While OpenAI had gotten contributions from other donors, he said, those “kept the lights on, but it wasn’t nearly enough to stay on the cutting edge.”\u003c/p>\n\u003cp>“They needed to get the money from somewhere, or else the project collapsed,” he said, alleging that donors weren’t willing to make the billion-dollar contributions that OpenAI needed without an expectation of return.\u003c/p>\n\u003cp>Since OpenAI established its first for-profit subsidiary, which capped investor returns at 100 times their investment, its business has exploded. It’s now a public benefit corporation, required to consider its mission statement but not necessarily to prioritize it.\u003c/p>\n\u003cp>Over the years, its mission statement has been changed several times. In 2023, according to the nonprofit parent organization’s \u003ca href=\"https://cdn.theconversation.com/static_files/files/4099/2023-IRS990-OpenAI.pdf?1770819990\">IRS disclosure form\u003c/a>, it sought to build AI that “safely benefits humanity, unconstrained by a need to generate financial return.” But last year, \u003ca href=\"https://app.candid.org/profile/9571629/openai-81-0861541?activeTab=7\">that same form\u003c/a> included a shorter mission statement — one that removed the word “safely” and any mention of finances, Tufts University business professor Alnoor Ebrahim \u003ca href=\"https://theconversation.com/openai-has-deleted-the-word-safely-from-its-mission-and-its-new-structure-is-a-test-for-whether-ai-serves-society-or-shareholders-274467\">wrote in \u003cem>The Conversation\u003c/em>\u003c/a>, an academic news outlet.\u003c/p>\n\u003cp>Former OpenAI employees have left and started a competitor, Anthropic, citing concerns over safety and the company’s direction. In 2023, OpenAI executives and board members, including Sutskever, staged a coup to briefly oust Altman as CEO. They said there’d been a breakdown in trust between him and the board, and that Altman engaged in a pattern of deception and wasn’t “consistently candid in his communications.”\u003c/p>\n\u003cp>Whether Altman’s and OpenAI’s pitch to develop their technology for the benefit of the world is an example of that deception is part of what jurors will aim to root out in the current trial.\u003c/p>\n\u003cp>“I didn’t want to pave the road to hell with good intentions,” Musk said on the stand on Tuesday afternoon. “If you have somebody who’s not trustworthy in charge of AI, I think that’s very dangerous for the whole world.”\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "In a federal courtroom in Oakland, attorneys for tech elites Sam Altman and Elon Musk painted very different pictures of the early years of OpenAI and its mission to benefit the common good.",
"status": "publish",
"parent": 0,
"modified": 1777482966,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 30,
"wordCount": 1473
},
"headData": {
"title": "Elon Musk Takes Aim at OpenAI as Trial Begins: ‘It’s Not OK to Steal a Charity’ | KQED",
"description": "In a federal courtroom in Oakland, attorneys for tech elites Sam Altman and Elon Musk painted very different pictures of the early years of OpenAI and its mission to benefit the common good.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Elon Musk Takes Aim at OpenAI as Trial Begins: ‘It’s Not OK to Steal a Charity’",
"datePublished": "2026-04-28T17:06:05-07:00",
"dateModified": "2026-04-29T10:16:06-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"audioUrl": "https://traffic.omny.fm/d/clips/0af137ef-751e-4b19-a055-aaef00d2d578/87fdd794-f90e-4280-920f-ab89016e8062/3ac84f6e-ca1f-4213-bd14-b43a01848097/audio.mp3",
"sticky": false,
"nprStoryId": "kqed-12081603",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12081603/elon-musk-takes-aim-at-openai-as-trial-begins-its-not-ok-to-steal-a-charity",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>In a federal courtroom in Oakland on Tuesday, attorneys for tech elites Sam Altman and Elon Musk set the stage for a \u003ca href=\"https://www.kqed.org/news/12081290/how-to-unscramble-an-omelet-in-silicon-valley-the-musk-v-altman-trial-that-will-try\">landmark case to determine whether OpenAI\u003c/a>, one of the most powerful artificial intelligence companies in the world, was founded on a lie.\u003c/p>\n\u003cp>At issue is whether the company’s stated mission — to lead AI development to benefit the common good — was authentic or a deceptive pitch designed to attract talent and investment. \u003ca href=\"https://www.kqed.org/forum/2010101912956/its-elon-musks-world-were-just-living-in-it\">Musk\u003c/a> alleges that co-founders Altman and Greg Brockman, who remains Altman’s second-in-command, participated in a “long con” to enrich themselves at his expense, after the three co-founded OpenAI as a nonprofit in 2015.\u003c/p>\n\u003cp>“They’re going to make this lawsuit very complicated, but it’s very simple,” Musk said of OpenAI on the stand on Tuesday afternoon. “It’s not OK to steal a charity.”\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>He departed the company after a falling out and \u003ca href=\"https://www.courtlistener.com/docket/69013420/musk-v-altman/\">sued the company\u003c/a> in 2024, alleging that OpenAI had breached charitable trust by restructuring as a for-profit company, now valued at more than $800 billion.\u003c/p>\n\u003cp>But Altman’s attorneys called the Tesla CEO’s behavior “a tale of two Musks,” shifting from pushing for OpenAI to become a for-profit company under his control, to caring about its nonprofit status only after launching competitor xAI in 2023. They argue OpenAI’s decision to adopt a for-profit structure was integral to its survival.\u003c/p>\n\u003cp>“We’re here because Mr. Musk didn’t get his way,” William Savitt, Altman’s lead attorney, said Tuesday. “And because he’s a competitor, he’ll do anything he can to attack OpenAI.”\u003c/p>\n\u003cfigure id=\"attachment_12081686\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081686\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-04-KQED-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Steve Molo, Elon Musk’s attorney, presents opening statements in the trial in which Elon Musk (center-right) claims that Sam Altman (right) and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Steven Molo, Musk’s counsel, told the jury that when Musk, Altman and Brockman set out to found an AI nonprofit, their goals were to develop the technology safely and for the \u003ca href=\"https://www.kqed.org/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises\">benefit of humanity\u003c/a>.\u003c/p>\n\u003cp>“It wasn’t a technology to get rich,” he said.\u003c/p>\n\u003cp>After operating as a strict nonprofit for years, OpenAI added a for-profit arm in 2019, which executives said was necessary to obtain the funding needed to develop artificial general intelligence — a more advanced AI technology that surpasses human intelligence, according to court filings.\u003c/p>\n\u003cp>In early conversations about how the for-profit entity would work, Molo said, the structure was likened to a museum gift shop whose revenue funds the institution’s galleries and operations. Brockman and Altman reassured Musk that they were still committed to the nonprofit structure, he said.\u003c/p>\n\u003cp>But behind the scenes, Molo alleges that the other co-founders had more lucrative desires.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12081290",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260422-ALTMANMUSK-MD-01-KQED.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>In court filings, he cited a journal in which Brockman wrote that “it would be nice to be making the billions … we’ve been thinking that maybe we should just flip to a for-profit. making the money for us sounds great and all.”\u003c/p>\n\u003cp>Brockman also wrote that he and another top OpenAI executive, Ilya Sutskever, “cannot say that we are committed to the non-profit. don’t wanna say that we’re committed. If three months later we’re doing B-Corp [a certification for for-profit corporations with social and environmental missions], then it was a lie.”\u003c/p>\n\u003cp>Years later, after Musk had departed OpenAI, the company was “no longer operating for the good of humanity,” Molo said.\u003c/p>\n\u003cp>“The museum store sold the Picassos,” he said.\u003c/p>\n\u003cp>Musk’s lawsuit claims OpenAI breached charitable trust and alleges unjust enrichment, which means that one party unfairly benefits at the expense of another. He also accuses Microsoft, which is the company’s largest financial backer and until this week held the exclusive rights to license and sell its technology, of aiding and abetting OpenAI’s breach of charitable trust.\u003c/p>\n\u003cp>OpenAI’s defense, meanwhile, alleges that Musk’s suit is less motivated by a desire to do good than it is by vengeance for his former colleagues, whose company is now eyeing an initial public offering valued at up to $1 trillion.\u003c/p>\n\u003cp>“Musk sat on his claims for years,” Savitt said. “He knew everything that was happening when it was happening. My clients had the nerve to go out and succeed without him.”\u003c/p>\n\u003cp>He also pointed out that Musk launched xAI a year before bringing the lawsuit, which would make OpenAI his competitor.\u003c/p>\n\u003cfigure id=\"attachment_12081681\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12081681\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1.jpg\" alt=\"\" width=\"2000\" height=\"1125\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-160x90.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-1536x864.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/260428-MUSK-ALTMAN-VB-03-KQED-1-1200x675.jpg 1200w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Representing Microsoft, Russell Coan (left) speaks as Elon Musk watches in the trial in which Elon Musk claims that Sam Altman and OpenAI abandoned their founding promise to develop AI for the benefit of humanity, rather than solely for profit, in Oakland on April 28, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Savitt pointed to moments early in OpenAI’s development, when Musk suggested that it would be “probably better” for the company to operate as a “standard C corp[oration] with a parallel nonprofit.” He initially promised to cover the balance of the funding it needed, but reneged when he didn’t get to control the company, Savitt told the jury.\u003c/p>\n\u003cp>Musk was in the middle of the conversations about pivoting from a nonprofit, Savitt said. As early as the summer of 2017, he insisted on holding a majority equity stake in any for-profit entity, as well as controlling its board of directors and serving as CEO, according to OpenAI’s court filings.\u003c/p>\n\u003cp>In the fall of that year, after Brockman and Sutskever emailed Musk with concerns about the for-profit structure he proposed, the discussions collapsed, OpenAI alleges. After that, Musk stopped making significant quarterly funding contributions, and he left the company less than six months later.\u003c/p>\n\u003cp>Around that time, Brockman and Altman moved to pursue a for-profit arm — a decision their attorneys say they told Musk about prior to his departure from the board.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12079896",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Daniel-Moreno-Gama-AP.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Savitt said in court that Musk had given the company less than 4% of the funding he’d promised. While OpenAI had gotten contributions from other donors, he said, those “kept the lights on, but it wasn’t nearly enough to stay on the cutting edge.”\u003c/p>\n\u003cp>“They needed to get the money from somewhere, or else the project collapsed,” he said, alleging that donors weren’t willing to make the billion-dollar contributions that OpenAI needed without an expectation of return.\u003c/p>\n\u003cp>Since OpenAI established its first for-profit subsidiary, which capped investor returns at 100 times their investment, its business has exploded. It’s now a public benefit corporation, required to consider its mission statement but not necessarily to prioritize it.\u003c/p>\n\u003cp>Over the years, its mission statement has been changed several times. In 2023, according to the nonprofit parent organization’s \u003ca href=\"https://cdn.theconversation.com/static_files/files/4099/2023-IRS990-OpenAI.pdf?1770819990\">IRS disclosure form\u003c/a>, it sought to build AI that “safely benefits humanity, unconstrained by a need to generate financial return.” But last year, \u003ca href=\"https://app.candid.org/profile/9571629/openai-81-0861541?activeTab=7\">that same form\u003c/a> included a shorter mission statement — one that removed the word “safely” and any mention of finances, Tufts University business professor Alnoor Ebrahim \u003ca href=\"https://theconversation.com/openai-has-deleted-the-word-safely-from-its-mission-and-its-new-structure-is-a-test-for-whether-ai-serves-society-or-shareholders-274467\">wrote in \u003cem>The Conversation\u003c/em>\u003c/a>, an academic news outlet.\u003c/p>\n\u003cp>Former OpenAI employees have left and started a competitor, Anthropic, citing concerns over safety and the company’s direction. In 2023, OpenAI executives and board members, including Sutskever, staged a coup to briefly oust Altman as CEO. They said there’d been a breakdown in trust between him and the board, and that Altman engaged in a pattern of deception and wasn’t “consistently candid in his communications.”\u003c/p>\n\u003cp>Whether Altman’s and OpenAI’s pitch to develop their technology for the benefit of the world is an example of that deception is part of what jurors will aim to root out in the current trial.\u003c/p>\n\u003cp>“I didn’t want to pave the road to hell with good intentions,” Musk said on the stand on Tuesday afternoon. “If you have somebody who’s not trustworthy in charge of AI, I think that’s very dangerous for the whole world.”\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12081603/elon-musk-takes-aim-at-openai-as-trial-begins-its-not-ok-to-steal-a-charity",
"authors": [
"11913",
"251"
],
"categories": [
"news_6188",
"news_28250",
"news_8",
"news_248"
],
"tags": [
"news_34755",
"news_32668",
"news_18352",
"news_3897",
"news_27626",
"news_19954",
"news_34054",
"news_33542",
"news_33543",
"news_34586",
"news_1631"
],
"featImg": "news_12081639",
"label": "news"
},
"news_12081290": {
"type": "posts",
"id": "news_12081290",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12081290",
"score": null,
"sort": [
1777287633000
]
},
"guestAuthors": [],
"slug": "how-to-unscramble-an-omelet-in-silicon-valley-the-musk-v-altman-trial-that-will-try",
"title": "How to Unscramble an Omelet in Silicon Valley: The Musk v. Altman Trial That Will Try",
"publishDate": 1777287633,
"format": "standard",
"headTitle": "How to Unscramble an Omelet in Silicon Valley: The Musk v. Altman Trial That Will Try | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>Starting Monday in Oakland, a federal judge will consider \u003ca href=\"https://www.kqed.org/forum/2010101912956/its-elon-musks-world-were-just-living-in-it\">Elon Musk\u003c/a>’s claim that Sam Altman and OpenAI abandoned their founding promise to develop AI for the \u003ca href=\"https://www.kqed.org/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises\">benefit of humanity\u003c/a>, rather than solely for profit. At stake is not just $134 billion in potential damages, but whether it matters, legally speaking, that one of the most powerful AI companies in the world was built on a lie.\u003c/p>\n\u003cp>Musk and Altman co-founded OpenAI in 2015 as a nonprofit research lab, along with Greg Brockman, an AI researcher and entrepreneur, and others prominent in the field, but Musk left the company after a bitter falling out in 2018.\u003c/p>\n\u003cp>The following year, OpenAI established its first for-profit subsidiary, with investor returns capped at 100 times their investment. This structure would eventually evolve into the nearly trillion-dollar public benefit corporation OpenAI became in 2025. A public benefit corporation is essentially a for-profit company with a mission statement it’s legally required to consider, but not necessarily to prioritize.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>This\u003ca href=\"https://www.courtlistener.com/docket/69013420/musk-v-altman/\"> lawsuit\u003c/a>, filed in 2024, originally alleged that Altman and Brockman ran a ‘long con,’ conspiring to enrich themselves at Musk’s expense.\u003c/p>\n\u003cp>On the eve of trial, in a move OpenAI called “evasive,” Musk’s lawyers voluntarily dismissed those personal fraud claims. What proceeds to trial today are two claims that go beyond Musk’s personal grievance: unjust enrichment and breach of charitable trust — essentially, the argument that OpenAI betrayed, not just Musk, but the public it promised to serve.\u003c/p>\n\u003cp>OpenAI argues Musk was fully aware the research lab needed to evolve beyond its nonprofit structure, because he participated in those early discussions, and even proposed folding OpenAI into Tesla. Now, OpenAI’s lawyers argue, Musk is disingenuously trying to use the courts to kneecap the most prominent rival to his own weaker and more controversial AI venture, xAI.\u003c/p>\n\u003cfigure id=\"attachment_12075430\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12075430\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-03-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-03-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-03-KQED-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-03-KQED-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">A courtroom sketch depicts Elon Musk on the stand on March 4, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“Motivated by jealousy, regret for walking away from OpenAI and a desire to derail a competing AI company, Elon has spent years harassing OpenAI through baseless lawsuits and public attacks,” the company\u003ca href=\"https://openai.com/index/openai-elon-musk/\"> posted\u003c/a> on its website, where it also offers a\u003ca href=\"https://openai.com/index/elon-musk-wanted-an-openai-for-profit/\"> timeline\u003c/a> that Musk v. Altman et al case watchers will find helpful as they follow what promises to be a barnburner of a trial.\u003c/p>\n\u003cp>\u003ca href=\"https://www.courtlistener.com/docket/69013420/musk-v-altman/?page=3\">Hundreds of court filings\u003c/a> provide a dishy treasure trove of private communications worthy of a telenovela, including some juicy excerpts from Brockman’s personal journal.\u003c/p>\n\u003cp>He writes about Musk, “it’d be wrong to steal the nonprofit from him. … that’d be pretty morally bankrupt. and he’s really not an idiot.”[aside postID=news_12072425 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2024/05/AP24134775174210-1020x680.jpg']Also, “Financially, what will take me to $1B?”\u003c/p>\n\u003cp>But without a doubt, it is the beef between Musk and Altman that will dominate this show. “They really do not like each other. That part is not fake,” said Charlie Bullock, a senior research fellow at the nonprofit Institute for Law and AI who advises state and federal policy makers on AI governance topics.\u003c/p>\n\u003cp>This trial promises to put on lurid public display a mini-universe of incestuous business relationships between men famous for rewriting rules rather than following them.\u003c/p>\n\u003cp>Personal spite between Musk and Altman aside, Bullock said, “We’re going to learn a lot over the course of this case and from the conclusion of this case about whether the legal system can meaningfully constrain frontier AI labs.”\u003c/p>\n\u003cp>This trial, Bullock told KQED, is “sort of the fallback option” in the absence of other checks on bad behavior in the AI space, such as federal regulation.\u003c/p>\n\u003cp>There is, for instance, a well-established law in California about nonprofits, for-profits, and how transitions between the two should be regulated. Whether and how it applies in this case is up to U.S. District Judge Yvonne Gonzalez Rogers in Oakland to determine over the next month.\u003c/p>\n\u003ch2>OpenAI is like nothing that’s come before\u003c/h2>\n\u003cp>Jill Horwitz, a law professor at Northwestern University and faculty director of the Lowell Milken Center for Philanthropy and Nonprofits at UCLA Law, likens OpenAI’s unique structure to “An enormous tail on a tiny dog.”\u003c/p>\n\u003cp>“The tail is the operating company, which is what everybody thinks of as being OpenAI, and the dog is the nonprofit, and it’s tiny. And it remains to be seen whether that board can be independent enough, because there’s such overlap between the nonprofit board and the for-profit board,” Horwitz said.\u003c/p>\n\u003cfigure id=\"attachment_12054564\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12054564 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Samuel Altman, CEO of OpenAI, testifies before the Senate Judiciary Subcommittee on Privacy, Technology and the Law on May 16, 2023, in Washington, D.C. \u003ccite>(Win McNamee/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“It’s a weird structure. OpenAI isn’t one company. OpenAI is an interconnected group of companies. But it all is supposed to be advancing the nonprofit purpose,” Horwitz told KQED.\u003c/p>\n\u003cp>In 2018, even as OpenAI was privately contemplating the for-profit restructuring, it voluntarily adopted a new charter that restated and even strengthened its commitment to the public mission articulated at its founding.\u003c/p>\n\u003cp>In part, this had to do with the pressure Altman and OpenAI felt to attract top AI researchers, many of whom are concerned about the ethics of unleashing world-changing software on the rest of us. In 2024, 13 current and former OpenAI and Google DeepMind employees took the extraordinary step of publishing an \u003ca href=\"https://righttowarn.ai\">open letter\u003c/a> titled “Right to Warn,” calling out their own industry, and asking for protection if they warned the public.[aside postID=news_12079267 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Hegseth-Side-by-Side-c.jpg']“We are hopeful that these risks can be adequately mitigated with sufficient guidance from the scientific community, policymakers, and the public. However, AI companies have strong financial incentives to avoid effective oversight, and we do not believe bespoke structures of corporate governance are sufficient to change this.”\u003c/p>\n\u003cp>To this day, it remains unclear whether Altman’s talk about benefiting humanity was anything more than a savvy sales pitch designed to attract top AI talent and allay the concerns of \u003ca href=\"https://www.kqed.org/news/11976097/california-lawmakers-take-on-ai-regulation-with-a-host-of-bills\">federal regulators\u003c/a>. This is one of the key questions trial watchers will be most keen to see answered.\u003c/p>\n\u003cp>“It’s quite typical for scientific research organizations to do all the hard work of the research before their IP is sold to a for-profit company for practical purposes,” said Rose Chan Loui, founding executive director of the Lowell Milken Center for Philanthropy and Nonprofits at UCLA Law.\u003c/p>\n\u003cp>What makes OpenAI unusual, Chan Loui said, is how explicitly and repeatedly the AI developer bound itself to promising its AI would be developed safely and for the benefit of all of humanity. “When they opened up to investment and formed the subsidiary, they recommitted to that purpose. They tied themselves even more tightly.”\u003c/p>\n\u003cp>Anthropic, founded by former OpenAI employees who left over concerns about the company’s direction, has cultivated a reputation as the more safety-conscious, ethically serious player in the AI race, the light gray hat to OpenAI’s dark gray one. Anthropic chose to incorporate as a public benefit corporation from the beginning, rather than a nonprofit, because a public benefit corporation has far more legal flexibility. “Anthropic may be behaving in a way that the public thinks is more charitable, but its legal duties to do so are a lot lower than OpenAI’s,” Horwitz said.\u003c/p>\n\u003ch2>But is Musk the right party to bring this suit?\u003c/h2>\n\u003cp>For legal eagles following this case, it’s curious that Musk is the plaintiff, rather than California’s attorney general, who is the primary legal guardian of charitable assets in the state, where most of OpenAI’s assets are located. But in 2025, Attorney General Rob Bonta negotiated a binding \u003ca href=\"https://oag.ca.gov/system/files/attachments/press-docs/Final%20Executed%20MOU%20Between%20OpenAI%20and%20California%20AG%20re%20Notice%20of%20Conditions%20of%20Non-Objection%20%2810.27.2025%29%20%28Signed%20by%20OpenAI%29%20%28Signed%20by%20CA%20DOJ%29.pdf\">memorandum of understanding\u003c/a> with OpenAI. The AG in Delaware, where OpenAI is incorporated, issued a parallel statement of non-objection.\u003c/p>\n\u003cp>A coalition of more than 30 California foundations and nonprofit organizations, including the San Francisco Foundation and TechEquity, \u003ca href=\"https://www.sff.org/Offsite-Media/Charitable-coalition-letter-on-OpenAI-conversion-1-29-25.pdf\">urged Bonta\u003c/a> to take immediate legal action to protect OpenAI’s charitable assets, arguing his office had both the authority and the responsibility to do so.\u003c/p>\n\u003cfigure id=\"attachment_12063671\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12063671\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/RobBontaAP.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/RobBontaAP.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/RobBontaAP-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/RobBontaAP-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">California Attorney General Rob Bonta speaks to reporters as Arizona Attorney General Kris Mayes, left, and Oregon Attorney General Dan Rayfield, right, listen outside the Supreme Court on Wednesday, Nov. 5, 2025, in Washington, D.C. \u003ccite>(Mark Schiefelbein/AP Photo)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>\u003ca href=\"https://www.kqed.org/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises\">More than 50 organizations\u003c/a> also petitioned Bonta to halt OpenAI’s for-profit conversion until he calculated the full market value of OpenAI’s nonprofit assets, estimated at the time at up to $300 billion, and directed OpenAI to transfer that value to independent nonprofit entities.\u003c/p>\n\u003cp>“It’s not too late for the Attorney General to revisit his agreement with OpenAI,” wrote Catherine Bracy, founder and CEO of TechEquity, an Oakland-based tech accountability organization. “The evidence this trial unearths, especially how OpenAI violated its original charitable mission in pursuit of profit, will likely leave him no choice.”\u003c/p>\n\u003cp>Chan Loui is among those scratching her head over a basic question: why does Musk get to bring this case at all? “He’s a competitor,” she said.\u003c/p>\n\u003cp>A personal fraud claim, that Altman lied to him to get his money, might have given Musk the clearest standing as an injured party. But Musk voluntarily dismissed those claims late last week. What remains rests almost entirely on a public interest argument, one that California’s attorney general, not a billionaire with a rival AI company of his own, would typically make. [aside postID=news_12079896 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Daniel-Moreno-Gama-AP.jpg']Chan Loui worries about what it would mean if Judge Gonzalez Rogers effectively threw out that hard-won agreement between the attorneys general and OpenAI, essentially substituting a billionaire rival’s lawsuit for the state’s own regulatory process, whatever its deficiencies.\u003c/p>\n\u003cp>“You don’t want just anyone, any donor to complain,” Chan Loui said. “We have all this litigation against charities.” She said she sympathizes with those who want OpenAI to recommit as fully as possible to its original ethos, but she worries about what legal precedents this case could set for everybody else.\u003c/p>\n\u003cp>What’s not in dispute is that this trial will be a riveting spectacle for Silicon Valley, which will be watching this case with a mix of curiosity and fear. Judge Gonzalez Rogers has already proven \u003ca href=\"https://oag.ca.gov/news/press-releases/attorney-general-bonta-epic-v-apple-decision-win-california-law-protecting\">she will rule\u003c/a> against powerful tech companies when she determines the law demands it.\u003c/p>\n\u003cp>Also, the documents already unsealed suggest that what gets said in that Oakland courtroom may reveal a lot more about how Silicon Valley’s AI elite actually operates than anything previously said or posted in public.\u003c/p>\n\u003cp>“How much is OpenAI worth? Most of \u003ca href=\"https://www.reuters.com/business/openai-lays-groundwork-juggernaut-ipo-up-1-trillion-valuation-2025-10-29/\">$1 trillion\u003c/a>?” Bullock said. “There are ways that you could unscramble this omelet, but it would be extremely difficult, and it would be a massive headache for everyone involved.” He anticipates that whoever ends up on the losing end of this case will appeal.\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "Two Silicon Valley titans, Elon Musk and Sam Altman, face off in court starting Monday in a case that claims Altman and others enriched themselves by allegedly betraying OpenAI’s founding mission.",
"status": "publish",
"parent": 0,
"modified": 1777313556,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 35,
"wordCount": 1943
},
"headData": {
"title": "How to Unscramble an Omelet in Silicon Valley: The Musk v. Altman Trial That Will Try | KQED",
"description": "Two Silicon Valley titans, Elon Musk and Sam Altman, face off in court starting Monday in a case that claims Altman and others enriched themselves by allegedly betraying OpenAI’s founding mission.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "How to Unscramble an Omelet in Silicon Valley: The Musk v. Altman Trial That Will Try",
"datePublished": "2026-04-27T04:00:33-07:00",
"dateModified": "2026-04-27T11:12:36-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 6188,
"slug": "law-and-justice",
"name": "Law and Justice"
},
"audioUrl": "https://traffic.omny.fm/d/clips/0af137ef-751e-4b19-a055-aaef00d2d578/ffca7e9f-6831-41c5-bcaf-aaef00f5a073/a372dc1c-fe90-423e-b5c6-b439011129f7/audio.mp3",
"sticky": false,
"nprStoryId": "kqed-12081290",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12081290/how-to-unscramble-an-omelet-in-silicon-valley-the-musk-v-altman-trial-that-will-try",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>Starting Monday in Oakland, a federal judge will consider \u003ca href=\"https://www.kqed.org/forum/2010101912956/its-elon-musks-world-were-just-living-in-it\">Elon Musk\u003c/a>’s claim that Sam Altman and OpenAI abandoned their founding promise to develop AI for the \u003ca href=\"https://www.kqed.org/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises\">benefit of humanity\u003c/a>, rather than solely for profit. At stake is not just $134 billion in potential damages, but whether it matters, legally speaking, that one of the most powerful AI companies in the world was built on a lie.\u003c/p>\n\u003cp>Musk and Altman co-founded OpenAI in 2015 as a nonprofit research lab, along with Greg Brockman, an AI researcher and entrepreneur, and others prominent in the field, but Musk left the company after a bitter falling out in 2018.\u003c/p>\n\u003cp>The following year, OpenAI established its first for-profit subsidiary, with investor returns capped at 100 times their investment. This structure would eventually evolve into the nearly trillion-dollar public benefit corporation OpenAI became in 2025. A public benefit corporation is essentially a for-profit company with a mission statement it’s legally required to consider, but not necessarily to prioritize.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>This\u003ca href=\"https://www.courtlistener.com/docket/69013420/musk-v-altman/\"> lawsuit\u003c/a>, filed in 2024, originally alleged that Altman and Brockman ran a ‘long con,’ conspiring to enrich themselves at Musk’s expense.\u003c/p>\n\u003cp>On the eve of trial, in a move OpenAI called “evasive,” Musk’s lawyers voluntarily dismissed those personal fraud claims. What proceeds to trial today are two claims that go beyond Musk’s personal grievance: unjust enrichment and breach of charitable trust — essentially, the argument that OpenAI betrayed, not just Musk, but the public it promised to serve.\u003c/p>\n\u003cp>OpenAI argues Musk was fully aware the research lab needed to evolve beyond its nonprofit structure, because he participated in those early discussions, and even proposed folding OpenAI into Tesla. Now, OpenAI’s lawyers argue, Musk is disingenuously trying to use the courts to kneecap the most prominent rival to his own weaker and more controversial AI venture, xAI.\u003c/p>\n\u003cfigure id=\"attachment_12075430\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12075430\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-03-KQED.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-03-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-03-KQED-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2026/03/260304-Elon-Musk-Trial-03-KQED-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">A courtroom sketch depicts Elon Musk on the stand on March 4, 2026. \u003ccite>(Vicki Behringer for KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“Motivated by jealousy, regret for walking away from OpenAI and a desire to derail a competing AI company, Elon has spent years harassing OpenAI through baseless lawsuits and public attacks,” the company\u003ca href=\"https://openai.com/index/openai-elon-musk/\"> posted\u003c/a> on its website, where it also offers a\u003ca href=\"https://openai.com/index/elon-musk-wanted-an-openai-for-profit/\"> timeline\u003c/a> that Musk v. Altman et al case watchers will find helpful as they follow what promises to be a barnburner of a trial.\u003c/p>\n\u003cp>\u003ca href=\"https://www.courtlistener.com/docket/69013420/musk-v-altman/?page=3\">Hundreds of court filings\u003c/a> provide a dishy treasure trove of private communications worthy of a telenovela, including some juicy excerpts from Brockman’s personal journal.\u003c/p>\n\u003cp>He writes about Musk, “it’d be wrong to steal the nonprofit from him. … that’d be pretty morally bankrupt. and he’s really not an idiot.”\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12072425",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/05/AP24134775174210-1020x680.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Also, “Financially, what will take me to $1B?”\u003c/p>\n\u003cp>But without a doubt, it is the beef between Musk and Altman that will dominate this show. “They really do not like each other. That part is not fake,” said Charlie Bullock, a senior research fellow at the nonprofit Institute for Law and AI who advises state and federal policy makers on AI governance topics.\u003c/p>\n\u003cp>This trial promises to put on lurid public display a mini-universe of incestuous business relationships between men famous for rewriting rules rather than following them.\u003c/p>\n\u003cp>Personal spite between Musk and Altman aside, Bullock said, “We’re going to learn a lot over the course of this case and from the conclusion of this case about whether the legal system can meaningfully constrain frontier AI labs.”\u003c/p>\n\u003cp>This trial, Bullock told KQED, is “sort of the fallback option” in the absence of other checks on bad behavior in the AI space, such as federal regulation.\u003c/p>\n\u003cp>There is, for instance, a well-established law in California about nonprofits, for-profits, and how transitions between the two should be regulated. Whether and how it applies in this case is up to U.S. District Judge Yvonne Gonzalez Rogers in Oakland to determine over the next month.\u003c/p>\n\u003ch2>OpenAI is like nothing that’s come before\u003c/h2>\n\u003cp>Jill Horwitz, a law professor at Northwestern University and faculty director of the Lowell Milken Center for Philanthropy and Nonprofits at UCLA Law, likens OpenAI’s unique structure to “An enormous tail on a tiny dog.”\u003c/p>\n\u003cp>“The tail is the operating company, which is what everybody thinks of as being OpenAI, and the dog is the nonprofit, and it’s tiny. And it remains to be seen whether that board can be independent enough, because there’s such overlap between the nonprofit board and the for-profit board,” Horwitz said.\u003c/p>\n\u003cfigure id=\"attachment_12054564\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12054564 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Samuel Altman, CEO of OpenAI, testifies before the Senate Judiciary Subcommittee on Privacy, Technology and the Law on May 16, 2023, in Washington, D.C. \u003ccite>(Win McNamee/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“It’s a weird structure. OpenAI isn’t one company. OpenAI is an interconnected group of companies. But it all is supposed to be advancing the nonprofit purpose,” Horwitz told KQED.\u003c/p>\n\u003cp>In 2018, even as OpenAI was privately contemplating the for-profit restructuring, it voluntarily adopted a new charter that restated and even strengthened its commitment to the public mission articulated at its founding.\u003c/p>\n\u003cp>In part, this had to do with the pressure Altman and OpenAI felt to attract top AI researchers, many of whom are concerned about the ethics of unleashing world-changing software on the rest of us. In 2024, 13 current and former OpenAI and Google DeepMind employees took the extraordinary step of publishing an \u003ca href=\"https://righttowarn.ai\">open letter\u003c/a> titled “Right to Warn,” calling out their own industry, and asking for protection if they warned the public.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12079267",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Hegseth-Side-by-Side-c.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>“We are hopeful that these risks can be adequately mitigated with sufficient guidance from the scientific community, policymakers, and the public. However, AI companies have strong financial incentives to avoid effective oversight, and we do not believe bespoke structures of corporate governance are sufficient to change this.”\u003c/p>\n\u003cp>To this day, it remains unclear whether Altman’s talk about benefiting humanity was anything more than a savvy sales pitch designed to attract top AI talent and allay the concerns of \u003ca href=\"https://www.kqed.org/news/11976097/california-lawmakers-take-on-ai-regulation-with-a-host-of-bills\">federal regulators\u003c/a>. This is one of the key questions trial watchers will be most keen to see answered.\u003c/p>\n\u003cp>“It’s quite typical for scientific research organizations to do all the hard work of the research before their IP is sold to a for-profit company for practical purposes,” said Rose Chan Loui, founding executive director of the Lowell Milken Center for Philanthropy and Nonprofits at UCLA Law.\u003c/p>\n\u003cp>What makes OpenAI unusual, Chan Loui said, is how explicitly and repeatedly the AI developer bound itself to promising its AI would be developed safely and for the benefit of all of humanity. “When they opened up to investment and formed the subsidiary, they recommitted to that purpose. They tied themselves even more tightly.”\u003c/p>\n\u003cp>Anthropic, founded by former OpenAI employees who left over concerns about the company’s direction, has cultivated a reputation as the more safety-conscious, ethically serious player in the AI race, the light gray hat to OpenAI’s dark gray one. Anthropic chose to incorporate as a public benefit corporation from the beginning, rather than a nonprofit, because a public benefit corporation has far more legal flexibility. “Anthropic may be behaving in a way that the public thinks is more charitable, but its legal duties to do so are a lot lower than OpenAI’s,” Horwitz said.\u003c/p>\n\u003ch2>But is Musk the right party to bring this suit?\u003c/h2>\n\u003cp>For legal eagles following this case, it’s curious that Musk is the plaintiff, rather than California’s attorney general, who is the primary legal guardian of charitable assets in the state, where most of OpenAI’s assets are located. But in 2025, Attorney General Rob Bonta negotiated a binding \u003ca href=\"https://oag.ca.gov/system/files/attachments/press-docs/Final%20Executed%20MOU%20Between%20OpenAI%20and%20California%20AG%20re%20Notice%20of%20Conditions%20of%20Non-Objection%20%2810.27.2025%29%20%28Signed%20by%20OpenAI%29%20%28Signed%20by%20CA%20DOJ%29.pdf\">memorandum of understanding\u003c/a> with OpenAI. The AG in Delaware, where OpenAI is incorporated, issued a parallel statement of non-objection.\u003c/p>\n\u003cp>A coalition of more than 30 California foundations and nonprofit organizations, including the San Francisco Foundation and TechEquity, \u003ca href=\"https://www.sff.org/Offsite-Media/Charitable-coalition-letter-on-OpenAI-conversion-1-29-25.pdf\">urged Bonta\u003c/a> to take immediate legal action to protect OpenAI’s charitable assets, arguing his office had both the authority and the responsibility to do so.\u003c/p>\n\u003cfigure id=\"attachment_12063671\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12063671\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/RobBontaAP.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/RobBontaAP.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/RobBontaAP-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/RobBontaAP-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">California Attorney General Rob Bonta speaks to reporters as Arizona Attorney General Kris Mayes, left, and Oregon Attorney General Dan Rayfield, right, listen outside the Supreme Court on Wednesday, Nov. 5, 2025, in Washington, D.C. \u003ccite>(Mark Schiefelbein/AP Photo)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>\u003ca href=\"https://www.kqed.org/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises\">More than 50 organizations\u003c/a> also petitioned Bonta to halt OpenAI’s for-profit conversion until he calculated the full market value of OpenAI’s nonprofit assets, estimated at the time at up to $300 billion, and directed OpenAI to transfer that value to independent nonprofit entities.\u003c/p>\n\u003cp>“It’s not too late for the Attorney General to revisit his agreement with OpenAI,” wrote Catherine Bracy, founder and CEO of TechEquity, an Oakland-based tech accountability organization. “The evidence this trial unearths, especially how OpenAI violated its original charitable mission in pursuit of profit, will likely leave him no choice.”\u003c/p>\n\u003cp>Chan Loui is among those scratching her head over a basic question: why does Musk get to bring this case at all? “He’s a competitor,” she said.\u003c/p>\n\u003cp>A personal fraud claim, that Altman lied to him to get his money, might have given Musk the clearest standing as an injured party. But Musk voluntarily dismissed those claims late last week. What remains rests almost entirely on a public interest argument, one that California’s attorney general, not a billionaire with a rival AI company of his own, would typically make. \u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12079896",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2026/04/Daniel-Moreno-Gama-AP.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Chan Loui worries about what it would mean if Judge Gonzalez Rogers effectively threw out that hard-won agreement between the attorneys general and OpenAI, essentially substituting a billionaire rival’s lawsuit for the state’s own regulatory process, whatever its deficiencies.\u003c/p>\n\u003cp>“You don’t want just anyone, any donor to complain,” Chan Loui said. “We have all this litigation against charities.” She said she sympathizes with those who want OpenAI to recommit as fully as possible to its original ethos, but she worries about what legal precedents this case could set for everybody else.\u003c/p>\n\u003cp>What’s not in dispute is that this trial will be a riveting spectacle for Silicon Valley, which will be watching this case with a mix of curiosity and fear. Judge Gonzalez Rogers has already proven \u003ca href=\"https://oag.ca.gov/news/press-releases/attorney-general-bonta-epic-v-apple-decision-win-california-law-protecting\">she will rule\u003c/a> against powerful tech companies when she determines the law demands it.\u003c/p>\n\u003cp>Also, the documents already unsealed suggest that what gets said in that Oakland courtroom may reveal a lot more about how Silicon Valley’s AI elite actually operates than anything previously said or posted in public.\u003c/p>\n\u003cp>“How much is OpenAI worth? Most of \u003ca href=\"https://www.reuters.com/business/openai-lays-groundwork-juggernaut-ipo-up-1-trillion-valuation-2025-10-29/\">$1 trillion\u003c/a>?” Bullock said. “There are ways that you could unscramble this omelet, but it would be extremely difficult, and it would be a massive headache for everyone involved.” He anticipates that whoever ends up on the losing end of this case will appeal.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12081290/how-to-unscramble-an-omelet-in-silicon-valley-the-musk-v-altman-trial-that-will-try",
"authors": [
"251"
],
"categories": [
"news_6188",
"news_8",
"news_248"
],
"tags": [
"news_34755",
"news_1386",
"news_18538",
"news_3897",
"news_27626",
"news_23052",
"news_19954",
"news_34054",
"news_33542",
"news_33543",
"news_38",
"news_34586",
"news_1631"
],
"featImg": "news_12080929",
"label": "news"
}
},
"programsReducer": {
"all-things-considered": {
"id": "all-things-considered",
"title": "All Things Considered",
"info": "Every weekday, \u003cem>All Things Considered\u003c/em> hosts Robert Siegel, Audie Cornish, Ari Shapiro, and Kelly McEvers present the program's trademark mix of news, interviews, commentaries, reviews, and offbeat features. Michel Martin hosts on the weekends.",
"airtime": "MON-FRI 1pm-2pm, 4:30pm-6:30pm\u003cbr />SAT-SUN 5pm-6pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/All-Things-Considered-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/all-things-considered/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/all-things-considered"
},
"american-suburb-podcast": {
"id": "american-suburb-podcast",
"title": "American Suburb: The Podcast",
"tagline": "The flip side of gentrification, told through one town",
"info": "Gentrification is changing cities across America, forcing people from neighborhoods they have long called home. Call them the displaced. Now those priced out of the Bay Area are looking for a better life in an unlikely place. American Suburb follows this migration to one California town along the Delta, 45 miles from San Francisco. But is this once sleepy suburb ready for them?",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/American-Suburb-Podcast-Tile-703x703-1.jpg",
"officialWebsiteLink": "/news/series/american-suburb-podcast",
"meta": {
"site": "news",
"source": "kqed",
"order": 19
},
"link": "/news/series/american-suburb-podcast/",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/RBrW",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?mt=2&id=1287748328",
"tuneIn": "https://tunein.com/radio/American-Suburb-p1086805/",
"rss": "https://ww2.kqed.org/news/series/american-suburb-podcast/feed/podcast",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkMzMDExODgxNjA5"
}
},
"baycurious": {
"id": "baycurious",
"title": "Bay Curious",
"tagline": "Exploring the Bay Area, one question at a time",
"info": "KQED’s new podcast, Bay Curious, gets to the bottom of the mysteries — both profound and peculiar — that give the Bay Area its unique identity. And we’ll do it with your help! You ask the questions. You decide what Bay Curious investigates. And you join us on the journey to find the answers.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Bay-Curious-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Bay Curious",
"officialWebsiteLink": "/news/series/baycurious",
"meta": {
"site": "news",
"source": "kqed",
"order": 3
},
"link": "/podcasts/baycurious",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/bay-curious/id1172473406",
"npr": "https://www.npr.org/podcasts/500557090/bay-curious",
"rss": "https://ww2.kqed.org/news/category/bay-curious-podcast/feed/podcast",
"amazon": "https://music.amazon.com/podcasts/9a90d476-aa04-455d-9a4c-0871ed6216d4/bay-curious",
"stitcher": "https://www.stitcher.com/podcast/kqed/bay-curious",
"spotify": "https://open.spotify.com/show/6O76IdmhixfijmhTZLIJ8k"
}
},
"bbc-world-service": {
"id": "bbc-world-service",
"title": "BBC World Service",
"info": "The day's top stories from BBC News compiled twice daily in the week, once at weekends.",
"airtime": "MON-FRI 9pm-10pm, TUE-FRI 1am-2am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/BBC-World-Service-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.bbc.co.uk/sounds/play/live:bbc_world_service",
"meta": {
"site": "news",
"source": "BBC World Service"
},
"link": "/radio/program/bbc-world-service",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/global-news-podcast/id135067274?mt=2",
"tuneIn": "https://tunein.com/radio/BBC-World-Service-p455581/",
"rss": "https://podcasts.files.bbci.co.uk/p02nq0gn.rss"
}
},
"californiareport": {
"id": "californiareport",
"title": "The California Report",
"tagline": "California, day by day",
"info": "KQED’s statewide radio news program providing daily coverage of issues, trends and public policy decisions.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-California-Report-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The California Report",
"officialWebsiteLink": "/californiareport",
"meta": {
"site": "news",
"source": "kqed",
"order": 8
},
"link": "/californiareport",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/kqeds-the-california-report/id79681292",
"amazon": "https://music.amazon.com/podcasts/26099305-72af-4542-9dde-ac1807fe36d5/kqed-s-the-california-report",
"npr": "https://www.npr.org/podcasts/432285393/the-california-report",
"stitcher": "https://www.stitcher.com/podcast/kqedfm-kqeds-the-california-report-podcast-8838",
"rss": "https://ww2.kqed.org/news/tag/tcram/feed/podcast"
}
},
"californiareportmagazine": {
"id": "californiareportmagazine",
"title": "The California Report Magazine",
"tagline": "Your state, your stories",
"info": "Every week, The California Report Magazine takes you on a road trip for the ears: to visit the places and meet the people who make California unique. The in-depth storytelling podcast from the California Report.",
"airtime": "FRI 4:30pm-5pm, 6:30pm-7pm, 11pm-11:30pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-California-Report-Magazine-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The California Report Magazine",
"officialWebsiteLink": "/californiareportmagazine",
"meta": {
"site": "news",
"source": "kqed",
"order": 10
},
"link": "/californiareportmagazine",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-california-report-magazine/id1314750545",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM3NjkwNjk1OTAz",
"npr": "https://www.npr.org/podcasts/564733126/the-california-report-magazine",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-california-report-magazine",
"rss": "https://ww2.kqed.org/news/tag/tcrmag/feed/podcast"
}
},
"city-arts": {
"id": "city-arts",
"title": "City Arts & Lectures",
"info": "A one-hour radio program to hear celebrated writers, artists and thinkers address contemporary ideas and values, often discussing the creative process. Please note: tapes or transcripts are not available",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/05/cityartsandlecture-300x300.jpg",
"officialWebsiteLink": "https://www.cityarts.net/",
"airtime": "SUN 1pm-2pm, TUE 10pm, WED 1am",
"meta": {
"site": "news",
"source": "City Arts & Lectures"
},
"link": "https://www.cityarts.net",
"subscribe": {
"tuneIn": "https://tunein.com/radio/City-Arts-and-Lectures-p692/",
"rss": "https://www.cityarts.net/feed/"
}
},
"closealltabs": {
"id": "closealltabs",
"title": "Close All Tabs",
"tagline": "Your irreverent guide to the trends redefining our world",
"info": "Close All Tabs breaks down how digital culture shapes our world through thoughtful insights and irreverent humor.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/02/CAT_2_Tile-scaled.jpg",
"imageAlt": "KQED Close All Tabs",
"officialWebsiteLink": "/podcasts/closealltabs",
"meta": {
"site": "news",
"source": "kqed",
"order": 1
},
"link": "/podcasts/closealltabs",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/close-all-tabs/id214663465",
"rss": "https://feeds.megaphone.fm/KQINC6993880386",
"amazon": "https://music.amazon.com/podcasts/92d9d4ac-67a3-4eed-b10a-fb45d45b1ef2/close-all-tabs",
"spotify": "https://open.spotify.com/show/6LAJFHnGK1pYXYzv6SIol6?si=deb0cae19813417c"
}
},
"code-switch-life-kit": {
"id": "code-switch-life-kit",
"title": "Code Switch / Life Kit",
"info": "\u003cem>Code Switch\u003c/em>, which listeners will hear in the first part of the hour, has fearless and much-needed conversations about race. Hosted by journalists of color, the show tackles the subject of race head-on, exploring how it impacts every part of society — from politics and pop culture to history, sports and more.\u003cbr />\u003cbr />\u003cem>Life Kit\u003c/em>, which will be in the second part of the hour, guides you through spaces and feelings no one prepares you for — from finances to mental health, from workplace microaggressions to imposter syndrome, from relationships to parenting. The show features experts with real world experience and shares their knowledge. Because everyone needs a little help being human.\u003cbr />\u003cbr />\u003ca href=\"https://www.npr.org/podcasts/510312/codeswitch\">\u003cem>Code Switch\u003c/em> offical site and podcast\u003c/a>\u003cbr />\u003ca href=\"https://www.npr.org/lifekit\">\u003cem>Life Kit\u003c/em> offical site and podcast\u003c/a>\u003cbr />",
"airtime": "SUN 9pm-10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Code-Switch-Life-Kit-Podcast-Tile-360x360-1.jpg",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/code-switch-life-kit",
"subscribe": {
"apple": "https://podcasts.apple.com/podcast/1112190608?mt=2&at=11l79Y&ct=nprdirectory",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93d3cubnByLm9yZy9yc3MvcG9kY2FzdC5waHA_aWQ9NTEwMzEy",
"spotify": "https://open.spotify.com/show/3bExJ9JQpkwNhoHvaIIuyV",
"rss": "https://feeds.npr.org/510312/podcast.xml"
}
},
"commonwealth-club": {
"id": "commonwealth-club",
"title": "Commonwealth Club of California Podcast",
"info": "The Commonwealth Club of California is the nation's oldest and largest public affairs forum. As a non-partisan forum, The Club brings to the public airwaves diverse viewpoints on important topics. The Club's weekly radio broadcast - the oldest in the U.S., dating back to 1924 - is carried across the nation on public radio stations and is now podcasting. Our website archive features audio of our recent programs, as well as selected speeches from our long and distinguished history. This podcast feed is usually updated twice a week and is always un-edited.",
"airtime": "THU 10pm, FRI 1am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Commonwealth-Club-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.commonwealthclub.org/podcasts",
"meta": {
"site": "news",
"source": "Commonwealth Club of California"
},
"link": "/radio/program/commonwealth-club",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/commonwealth-club-of-california-podcast/id976334034?mt=2",
"google": "https://podcasts.google.com/feed/aHR0cDovL3d3dy5jb21tb253ZWFsdGhjbHViLm9yZy9hdWRpby9wb2RjYXN0L3dlZWtseS54bWw",
"tuneIn": "https://tunein.com/radio/Commonwealth-Club-of-California-p1060/"
}
},
"forum": {
"id": "forum",
"title": "Forum",
"tagline": "The conversation starts here",
"info": "KQED’s live call-in program discussing local, state, national and international issues, as well as in-depth interviews.",
"airtime": "MON-FRI 9am-11am, 10pm-11pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Forum-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Forum with Mina Kim and Alexis Madrigal",
"officialWebsiteLink": "/forum",
"meta": {
"site": "news",
"source": "kqed",
"order": 9
},
"link": "/forum",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/kqeds-forum/id73329719",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM5NTU3MzgxNjMz",
"npr": "https://www.npr.org/podcasts/432307980/forum",
"stitcher": "https://www.stitcher.com/podcast/kqedfm-kqeds-forum-podcast",
"rss": "https://feeds.megaphone.fm/KQINC9557381633"
}
},
"freakonomics-radio": {
"id": "freakonomics-radio",
"title": "Freakonomics Radio",
"info": "Freakonomics Radio is a one-hour award-winning podcast and public-radio project hosted by Stephen Dubner, with co-author Steve Levitt as a regular guest. It is produced in partnership with WNYC.",
"imageSrc": "https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/05/freakonomicsRadio.png",
"officialWebsiteLink": "http://freakonomics.com/",
"airtime": "SUN 1am-2am, SAT 3pm-4pm",
"meta": {
"site": "radio",
"source": "WNYC"
},
"link": "/radio/program/freakonomics-radio",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/4s8b",
"apple": "https://itunes.apple.com/us/podcast/freakonomics-radio/id354668519",
"tuneIn": "https://tunein.com/podcasts/WNYC-Podcasts/Freakonomics-Radio-p272293/",
"rss": "https://feeds.feedburner.com/freakonomicsradio"
}
},
"fresh-air": {
"id": "fresh-air",
"title": "Fresh Air",
"info": "Hosted by Terry Gross, \u003cem>Fresh Air from WHYY\u003c/em> is the Peabody Award-winning weekday magazine of contemporary arts and issues. One of public radio's most popular programs, Fresh Air features intimate conversations with today's biggest luminaries.",
"airtime": "MON-FRI 7pm-8pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Fresh-Air-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/fresh-air/",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/fresh-air",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/4s8b",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=214089682&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Fresh-Air-p17/",
"rss": "https://feeds.npr.org/381444908/podcast.xml"
}
},
"here-and-now": {
"id": "here-and-now",
"title": "Here & Now",
"info": "A live production of NPR and WBUR Boston, in collaboration with stations across the country, Here & Now reflects the fluid world of news as it's happening in the middle of the day, with timely, in-depth news, interviews and conversation. Hosted by Robin Young, Jeremy Hobson and Tonya Mosley.",
"airtime": "MON-THU 11am-12pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Here-And-Now-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "http://www.wbur.org/hereandnow",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/here-and-now",
"subsdcribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?mt=2&id=426698661",
"tuneIn": "https://tunein.com/radio/Here--Now-p211/",
"rss": "https://feeds.npr.org/510051/podcast.xml"
}
},
"hidden-brain": {
"id": "hidden-brain",
"title": "Hidden Brain",
"info": "Shankar Vedantam uses science and storytelling to reveal the unconscious patterns that drive human behavior, shape our choices and direct our relationships.",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/05/hiddenbrain.jpg",
"officialWebsiteLink": "https://www.npr.org/series/423302056/hidden-brain",
"airtime": "SUN 7pm-8pm",
"meta": {
"site": "news",
"source": "NPR"
},
"link": "/radio/program/hidden-brain",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/hidden-brain/id1028908750?mt=2",
"tuneIn": "https://tunein.com/podcasts/Science-Podcasts/Hidden-Brain-p787503/",
"rss": "https://feeds.npr.org/510308/podcast.xml"
}
},
"how-i-built-this": {
"id": "how-i-built-this",
"title": "How I Built This with Guy Raz",
"info": "Guy Raz dives into the stories behind some of the world's best known companies. How I Built This weaves a narrative journey about innovators, entrepreneurs and idealists—and the movements they built.",
"imageSrc": "https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/05/howIBuiltThis.png",
"officialWebsiteLink": "https://www.npr.org/podcasts/510313/how-i-built-this",
"airtime": "SUN 7:30pm-8pm",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/how-i-built-this",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/3zxy",
"apple": "https://itunes.apple.com/us/podcast/how-i-built-this-with-guy-raz/id1150510297?mt=2",
"tuneIn": "https://tunein.com/podcasts/Arts--Culture-Podcasts/How-I-Built-This-p910896/",
"rss": "https://feeds.npr.org/510313/podcast.xml"
}
},
"hyphenacion": {
"id": "hyphenacion",
"title": "Hyphenación",
"tagline": "Where conversation and cultura meet",
"info": "What kind of no sabo word is Hyphenación? For us, it’s about living within a hyphenation. Like being a third-gen Mexican-American from the Texas border now living that Bay Area Chicano life. Like Xorje! Each week we bring together a couple of hyphenated Latinos to talk all about personal life choices: family, careers, relationships, belonging … everything is on the table. ",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/03/Hyphenacion_FinalAssets_PodcastTile.png",
"imageAlt": "KQED Hyphenación",
"officialWebsiteLink": "/podcasts/hyphenacion",
"meta": {
"site": "news",
"source": "kqed",
"order": 15
},
"link": "/podcasts/hyphenacion",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/hyphenaci%C3%B3n/id1191591838",
"spotify": "https://open.spotify.com/show/2p3Fifq96nw9BPcmFdIq0o?si=39209f7b25774f38",
"youtube": "https://www.youtube.com/c/kqedarts",
"amazon": "https://music.amazon.com/podcasts/6c3dd23c-93fb-4aab-97ba-1725fa6315f1/hyphenaci%C3%B3n",
"rss": "https://feeds.megaphone.fm/KQINC2275451163"
}
},
"jerrybrown": {
"id": "jerrybrown",
"title": "The Political Mind of Jerry Brown",
"tagline": "Lessons from a lifetime in politics",
"info": "The Political Mind of Jerry Brown brings listeners the wisdom of the former Governor, Mayor, and presidential candidate. Scott Shafer interviewed Brown for more than 40 hours, covering the former governor's life and half-century in the political game and Brown has some lessons he'd like to share. ",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Political-Mind-of-Jerry-Brown-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Political Mind of Jerry Brown",
"officialWebsiteLink": "/podcasts/jerrybrown",
"meta": {
"site": "news",
"source": "kqed",
"order": 18
},
"link": "/podcasts/jerrybrown",
"subscribe": {
"npr": "https://www.npr.org/podcasts/790253322/the-political-mind-of-jerry-brown",
"apple": "https://itunes.apple.com/us/podcast/id1492194549",
"rss": "https://ww2.kqed.org/news/series/jerrybrown/feed/podcast/",
"tuneIn": "http://tun.in/pjGcK",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-political-mind-of-jerry-brown",
"spotify": "https://open.spotify.com/show/54C1dmuyFyKMFttY6X2j6r?si=K8SgRCoISNK6ZbjpXrX5-w",
"amazon": "https://music.amazon.com/podcasts/44420f75-3b0e-4301-ab3b-16da6b09e543/the-political-mind-of-jerry-brown"
}
},
"latino-usa": {
"id": "latino-usa",
"title": "Latino USA",
"airtime": "MON 1am-2am, SUN 6pm-7pm",
"info": "Latino USA, the radio journal of news and culture, is the only national, English-language radio program produced from a Latino perspective.",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/latinoUsa.jpg",
"officialWebsiteLink": "http://latinousa.org/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/latino-usa",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/xtTd",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=79681317&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Latino-USA-p621/",
"rss": "https://feeds.npr.org/510016/podcast.xml"
}
},
"marketplace": {
"id": "marketplace",
"title": "Marketplace",
"info": "Our flagship program, helmed by Kai Ryssdal, examines what the day in money delivered, through stories, conversations, newsworthy numbers and more. Updated Monday through Friday at about 3:30 p.m. PT.",
"airtime": "MON-FRI 4pm-4:30pm, MON-WED 6:30pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Marketplace-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.marketplace.org/",
"meta": {
"site": "news",
"source": "American Public Media"
},
"link": "/radio/program/marketplace",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=201853034&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/APM-Marketplace-p88/",
"rss": "https://feeds.publicradio.org/public_feeds/marketplace-pm/rss/rss"
}
},
"masters-of-scale": {
"id": "masters-of-scale",
"title": "Masters of Scale",
"info": "Masters of Scale is an original podcast in which LinkedIn co-founder and Greylock Partner Reid Hoffman sets out to describe and prove theories that explain how great entrepreneurs take their companies from zero to a gazillion in ingenious fashion.",
"airtime": "Every other Wednesday June 12 through October 16 at 8pm (repeats Thursdays at 2am)",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Masters-of-Scale-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://mastersofscale.com/",
"meta": {
"site": "radio",
"source": "WaitWhat"
},
"link": "/radio/program/masters-of-scale",
"subscribe": {
"apple": "http://mastersofscale.app.link/",
"rss": "https://rss.art19.com/masters-of-scale"
}
},
"mindshift": {
"id": "mindshift",
"title": "MindShift",
"tagline": "A podcast about the future of learning and how we raise our kids",
"info": "The MindShift podcast explores the innovations in education that are shaping how kids learn. Hosts Ki Sung and Katrina Schwartz introduce listeners to educators, researchers, parents and students who are developing effective ways to improve how kids learn. We cover topics like how fed-up administrators are developing surprising tactics to deal with classroom disruptions; how listening to podcasts are helping kids develop reading skills; the consequences of overparenting; and why interdisciplinary learning can engage students on all ends of the traditional achievement spectrum. This podcast is part of the MindShift education site, a division of KQED News. KQED is an NPR/PBS member station based in San Francisco. You can also visit the MindShift website for episodes and supplemental blog posts or tweet us \u003ca href=\"https://twitter.com/MindShiftKQED\">@MindShiftKQED\u003c/a> or visit us at \u003ca href=\"/mindshift\">MindShift.KQED.org\u003c/a>",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Mindshift-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED MindShift: How We Will Learn",
"officialWebsiteLink": "/mindshift/",
"meta": {
"site": "news",
"source": "kqed",
"order": 12
},
"link": "/podcasts/mindshift",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/mindshift-podcast/id1078765985",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM1NzY0NjAwNDI5",
"npr": "https://www.npr.org/podcasts/464615685/mind-shift-podcast",
"stitcher": "https://www.stitcher.com/podcast/kqed/stories-teachers-share",
"spotify": "https://open.spotify.com/show/0MxSpNYZKNprFLCl7eEtyx"
}
},
"morning-edition": {
"id": "morning-edition",
"title": "Morning Edition",
"info": "\u003cem>Morning Edition\u003c/em> takes listeners around the country and the world with multi-faceted stories and commentaries every weekday. Hosts Steve Inskeep, David Greene and Rachel Martin bring you the latest breaking news and features to prepare you for the day.",
"airtime": "MON-FRI 3am-9am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Morning-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/morning-edition/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/morning-edition"
},
"onourwatch": {
"id": "onourwatch",
"title": "On Our Watch",
"tagline": "Deeply-reported investigative journalism",
"info": "For decades, the process for how police police themselves has been inconsistent – if not opaque. In some states, like California, these proceedings were completely hidden. After a new police transparency law unsealed scores of internal affairs files, our reporters set out to examine these cases and the shadow world of police discipline. On Our Watch brings listeners into the rooms where officers are questioned and witnesses are interrogated to find out who this system is really protecting. Is it the officers, or the public they've sworn to serve?",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/On-Our-Watch-Podcast-Tile-703x703-1.jpg",
"imageAlt": "On Our Watch from NPR and KQED",
"officialWebsiteLink": "/podcasts/onourwatch",
"meta": {
"site": "news",
"source": "kqed",
"order": 11
},
"link": "/podcasts/onourwatch",
"subscribe": {
"apple": "https://podcasts.apple.com/podcast/id1567098962",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5ucHIub3JnLzUxMDM2MC9wb2RjYXN0LnhtbD9zYz1nb29nbGVwb2RjYXN0cw",
"npr": "https://rpb3r.app.goo.gl/onourwatch",
"spotify": "https://open.spotify.com/show/0OLWoyizopu6tY1XiuX70x",
"tuneIn": "https://tunein.com/radio/On-Our-Watch-p1436229/",
"stitcher": "https://www.stitcher.com/show/on-our-watch",
"rss": "https://feeds.npr.org/510360/podcast.xml"
}
},
"on-the-media": {
"id": "on-the-media",
"title": "On The Media",
"info": "Our weekly podcast explores how the media 'sausage' is made, casts an incisive eye on fluctuations in the marketplace of ideas, and examines threats to the freedom of information and expression in America and abroad. For one hour a week, the show tries to lift the veil from the process of \"making media,\" especially news media, because it's through that lens that we see the world and the world sees us",
"airtime": "SUN 2pm-3pm, MON 12am-1am",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/onTheMedia.png",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/otm",
"meta": {
"site": "news",
"source": "wnyc"
},
"link": "/radio/program/on-the-media",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/on-the-media/id73330715?mt=2",
"tuneIn": "https://tunein.com/radio/On-the-Media-p69/",
"rss": "http://feeds.wnyc.org/onthemedia"
}
},
"pbs-newshour": {
"id": "pbs-newshour",
"title": "PBS NewsHour",
"info": "Analysis, background reports and updates from the PBS NewsHour putting today's news in context.",
"airtime": "MON-FRI 3pm-4pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/PBS-News-Hour-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.pbs.org/newshour/",
"meta": {
"site": "news",
"source": "pbs"
},
"link": "/radio/program/pbs-newshour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/pbs-newshour-full-show/id394432287?mt=2",
"tuneIn": "https://tunein.com/radio/PBS-NewsHour---Full-Show-p425698/",
"rss": "https://www.pbs.org/newshour/feeds/rss/podcasts/show"
}
},
"perspectives": {
"id": "perspectives",
"title": "Perspectives",
"tagline": "KQED's series of daily listener commentaries since 1991",
"info": "KQED's series of daily listener commentaries since 1991.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/01/Perspectives_Tile_Final.jpg",
"imageAlt": "KQED Perspectives",
"officialWebsiteLink": "/perspectives/",
"meta": {
"site": "radio",
"source": "kqed",
"order": 14
},
"link": "/perspectives",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/id73801135",
"npr": "https://www.npr.org/podcasts/432309616/perspectives",
"rss": "https://ww2.kqed.org/perspectives/category/perspectives/feed/",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93dzIua3FlZC5vcmcvcGVyc3BlY3RpdmVzL2NhdGVnb3J5L3BlcnNwZWN0aXZlcy9mZWVkLw"
}
},
"planet-money": {
"id": "planet-money",
"title": "Planet Money",
"info": "The economy explained. Imagine you could call up a friend and say, Meet me at the bar and tell me what's going on with the economy. Now imagine that's actually a fun evening.",
"airtime": "SUN 3pm-4pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/planetmoney.jpg",
"officialWebsiteLink": "https://www.npr.org/sections/money/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/planet-money",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/M4f5",
"apple": "https://itunes.apple.com/us/podcast/planet-money/id290783428?mt=2",
"tuneIn": "https://tunein.com/podcasts/Business--Economics-Podcasts/Planet-Money-p164680/",
"rss": "https://feeds.npr.org/510289/podcast.xml"
}
},
"politicalbreakdown": {
"id": "politicalbreakdown",
"title": "Political Breakdown",
"tagline": "Politics from a personal perspective",
"info": "Political Breakdown is a new series that explores the political intersection of California and the nation. Each week hosts Scott Shafer and Marisa Lagos are joined with a new special guest to unpack politics -- with personality — and offer an insider’s glimpse at how politics happens.",
"airtime": "THU 6:30pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Political-Breakdown-2024-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Political Breakdown",
"officialWebsiteLink": "/podcasts/politicalbreakdown",
"meta": {
"site": "radio",
"source": "kqed",
"order": 5
},
"link": "/podcasts/politicalbreakdown",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/political-breakdown/id1327641087",
"amazon": "https://music.amazon.com/podcasts/e0c2d153-ad36-4c8d-901d-f1da6a724824/political-breakdown",
"npr": "https://www.npr.org/podcasts/572155894/political-breakdown",
"stitcher": "https://www.stitcher.com/podcast/kqed/political-breakdown",
"spotify": "https://open.spotify.com/show/07RVyIjIdk2WDuVehvBMoN",
"rss": "https://ww2.kqed.org/news/tag/political-breakdown/feed/podcast"
}
},
"possible": {
"id": "possible",
"title": "Possible",
"info": "Possible is hosted by entrepreneur Reid Hoffman and writer Aria Finger. Together in Possible, Hoffman and Finger lead enlightening discussions about building a brighter collective future. The show features interviews with visionary guests like Trevor Noah, Sam Altman and Janette Sadik-Khan. Possible paints an optimistic portrait of the world we can create through science, policy, business, art and our shared humanity. It asks: What if everything goes right for once? How can we get there? Each episode also includes a short fiction story generated by advanced AI GPT-4, serving as a thought-provoking springboard to speculate how humanity could leverage technology for good.",
"airtime": "SUN 2pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Possible-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.possible.fm/",
"meta": {
"site": "news",
"source": "Possible"
},
"link": "/radio/program/possible",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/possible/id1677184070",
"spotify": "https://open.spotify.com/show/730YpdUSNlMyPQwNnyjp4k"
}
},
"pri-the-world": {
"id": "pri-the-world",
"title": "PRI's The World: Latest Edition",
"info": "Each weekday, host Marco Werman and his team of producers bring you the world's most interesting stories in an hour of radio that reminds us just how small our planet really is.",
"airtime": "MON-FRI 2pm-3pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-World-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.pri.org/programs/the-world",
"meta": {
"site": "news",
"source": "PRI"
},
"link": "/radio/program/pri-the-world",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/pris-the-world-latest-edition/id278196007?mt=2",
"tuneIn": "https://tunein.com/podcasts/News--Politics-Podcasts/PRIs-The-World-p24/",
"rss": "http://feeds.feedburner.com/pri/theworld"
}
},
"radiolab": {
"id": "radiolab",
"title": "Radiolab",
"info": "A two-time Peabody Award-winner, Radiolab is an investigation told through sounds and stories, and centered around one big idea. In the Radiolab world, information sounds like music and science and culture collide. Hosted by Jad Abumrad and Robert Krulwich, the show is designed for listeners who demand skepticism, but appreciate wonder. WNYC Studios is the producer of other leading podcasts including Freakonomics Radio, Death, Sex & Money, On the Media and many more.",
"airtime": "SUN 12am-1am, SAT 2pm-3pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/radiolab1400.png",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/radiolab/",
"meta": {
"site": "science",
"source": "WNYC"
},
"link": "/radio/program/radiolab",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/radiolab/id152249110?mt=2",
"tuneIn": "https://tunein.com/radio/RadioLab-p68032/",
"rss": "https://feeds.wnyc.org/radiolab"
}
},
"reveal": {
"id": "reveal",
"title": "Reveal",
"info": "Created by The Center for Investigative Reporting and PRX, Reveal is public radios first one-hour weekly radio show and podcast dedicated to investigative reporting. Credible, fact based and without a partisan agenda, Reveal combines the power and artistry of driveway moment storytelling with data-rich reporting on critically important issues. The result is stories that inform and inspire, arming our listeners with information to right injustices, hold the powerful accountable and improve lives.Reveal is hosted by Al Letson and showcases the award-winning work of CIR and newsrooms large and small across the nation. In a radio and podcast market crowded with choices, Reveal focuses on important and often surprising stories that illuminate the world for our listeners.",
"airtime": "SAT 4pm-5pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/reveal300px.png",
"officialWebsiteLink": "https://www.revealnews.org/episodes/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/reveal",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/reveal/id886009669",
"tuneIn": "https://tunein.com/radio/Reveal-p679597/",
"rss": "http://feeds.revealradio.org/revealpodcast"
}
},
"rightnowish": {
"id": "rightnowish",
"title": "Rightnowish",
"tagline": "Art is where you find it",
"info": "Rightnowish digs into life in the Bay Area right now… ish. Journalist Pendarvis Harshaw takes us to galleries painted on the sides of liquor stores in West Oakland. We'll dance in warehouses in the Bayview, make smoothies with kids in South Berkeley, and listen to classical music in a 1984 Cutlass Supreme in Richmond. Every week, Pen talks to movers and shakers about how the Bay Area shapes what they create, and how they shape the place we call home.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Rightnowish-Podcast-Tile-500x500-1.jpg",
"imageAlt": "KQED Rightnowish with Pendarvis Harshaw",
"officialWebsiteLink": "/podcasts/rightnowish",
"meta": {
"site": "arts",
"source": "kqed",
"order": 16
},
"link": "/podcasts/rightnowish",
"subscribe": {
"npr": "https://www.npr.org/podcasts/721590300/rightnowish",
"rss": "https://ww2.kqed.org/arts/programs/rightnowish/feed/podcast",
"apple": "https://podcasts.apple.com/us/podcast/rightnowish/id1482187648",
"stitcher": "https://www.stitcher.com/podcast/kqed/rightnowish",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkMxMjU5MTY3NDc4",
"spotify": "https://open.spotify.com/show/7kEJuafTzTVan7B78ttz1I"
}
},
"science-friday": {
"id": "science-friday",
"title": "Science Friday",
"info": "Science Friday is a weekly science talk show, broadcast live over public radio stations nationwide. Each week, the show focuses on science topics that are in the news and tries to bring an educated, balanced discussion to bear on the scientific issues at hand. Panels of expert guests join host Ira Flatow, a veteran science journalist, to discuss science and to take questions from listeners during the call-in portion of the program.",
"airtime": "FRI 11am-1pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Science-Friday-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/science-friday",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/science-friday",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=73329284&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Science-Friday-p394/",
"rss": "http://feeds.wnyc.org/science-friday"
}
},
"snap-judgment": {
"id": "snap-judgment",
"title": "Snap Judgment",
"tagline": "Real stories with killer beats",
"info": "The Snap Judgment radio show and podcast mixes real stories with killer beats to produce cinematic, dramatic radio. Snap's musical brand of storytelling dares listeners to see the world through the eyes of another. This is storytelling... with a BEAT!! Snap first aired on public radio stations nationwide in July 2010. Today, Snap Judgment airs on over 450 public radio stations and is brought to the airwaves by KQED & PRX.",
"airtime": "SAT 1pm-2pm, 9pm-10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/05/Snap-Judgment-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Snap Judgment",
"officialWebsiteLink": "https://snapjudgment.org",
"meta": {
"site": "arts",
"source": "kqed",
"order": 4
},
"link": "https://snapjudgment.org",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/snap-judgment/id283657561",
"npr": "https://www.npr.org/podcasts/449018144/snap-judgment",
"stitcher": "https://www.pandora.com/podcast/snap-judgment/PC:241?source=stitcher-sunset",
"spotify": "https://open.spotify.com/show/3Cct7ZWmxHNAtLgBTqjC5v",
"rss": "https://snap.feed.snapjudgment.org/"
}
},
"soldout": {
"id": "soldout",
"title": "SOLD OUT: Rethinking Housing in America",
"tagline": "A new future for housing",
"info": "Sold Out: Rethinking Housing in America",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Sold-Out-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Sold Out: Rethinking Housing in America",
"officialWebsiteLink": "/podcasts/soldout",
"meta": {
"site": "news",
"source": "kqed",
"order": 13
},
"link": "/podcasts/soldout",
"subscribe": {
"npr": "https://www.npr.org/podcasts/911586047/s-o-l-d-o-u-t-a-new-future-for-housing",
"apple": "https://podcasts.apple.com/us/podcast/introducing-sold-out-rethinking-housing-in-america/id1531354937",
"rss": "https://feeds.megaphone.fm/soldout",
"spotify": "https://open.spotify.com/show/38dTBSk2ISFoPiyYNoKn1X",
"stitcher": "https://www.stitcher.com/podcast/kqed/sold-out-rethinking-housing-in-america",
"tunein": "https://tunein.com/radio/SOLD-OUT-Rethinking-Housing-in-America-p1365871/"
}
},
"spooked": {
"id": "spooked",
"title": "Spooked",
"tagline": "True-life supernatural stories",
"info": "",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/10/Spooked-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Spooked",
"officialWebsiteLink": "https://spookedpodcast.org/",
"meta": {
"site": "news",
"source": "kqed",
"order": 7
},
"link": "https://spookedpodcast.org/",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/spooked/id1279361017",
"npr": "https://www.npr.org/podcasts/549547848/snap-judgment-presents-spooked",
"spotify": "https://open.spotify.com/show/76571Rfl3m7PLJQZKQIGCT",
"rss": "https://feeds.simplecast.com/TBotaapn"
}
},
"tech-nation": {
"id": "tech-nation",
"title": "Tech Nation Radio Podcast",
"info": "Tech Nation is a weekly public radio program, hosted by Dr. Moira Gunn. Founded in 1993, it has grown from a simple interview show to a multi-faceted production, featuring conversations with noted technology and science leaders, and a weekly science and technology-related commentary.",
"airtime": "FRI 10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Tech-Nation-Radio-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "http://technation.podomatic.com/",
"meta": {
"site": "science",
"source": "Tech Nation Media"
},
"link": "/radio/program/tech-nation",
"subscribe": {
"rss": "https://technation.podomatic.com/rss2.xml"
}
},
"ted-radio-hour": {
"id": "ted-radio-hour",
"title": "TED Radio Hour",
"info": "The TED Radio Hour is a journey through fascinating ideas, astonishing inventions, fresh approaches to old problems, and new ways to think and create.",
"airtime": "SUN 3pm-4pm, SAT 10pm-11pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/tedRadioHour.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/ted-radio-hour/?showDate=2018-06-22",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/ted-radio-hour",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/8vsS",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=523121474&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/TED-Radio-Hour-p418021/",
"rss": "https://feeds.npr.org/510298/podcast.xml"
}
},
"thebay": {
"id": "thebay",
"title": "The Bay",
"tagline": "Local news to keep you rooted",
"info": "Host Devin Katayama walks you through the biggest story of the day with reporters and newsmakers.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Bay-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Bay",
"officialWebsiteLink": "/podcasts/thebay",
"meta": {
"site": "radio",
"source": "kqed",
"order": 2
},
"link": "/podcasts/thebay",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-bay/id1350043452",
"amazon": "https://music.amazon.com/podcasts/d800ea4c-7a2c-42f2-b861-edaf78a5db0b/the-bay",
"npr": "https://www.npr.org/podcasts/586725995/the-bay",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-bay",
"spotify": "https://open.spotify.com/show/4BIKBKIujizLHlIlBNaAqQ",
"rss": "https://feeds.megaphone.fm/KQINC8259786327"
}
},
"thelatest": {
"id": "thelatest",
"title": "The Latest",
"tagline": "Trusted local news in real time",
"info": "",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/05/The-Latest-2025-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Latest",
"officialWebsiteLink": "/thelatest",
"meta": {
"site": "news",
"source": "kqed",
"order": 6
},
"link": "/thelatest",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-latest-from-kqed/id1197721799",
"npr": "https://www.npr.org/podcasts/1257949365/the-latest-from-k-q-e-d",
"spotify": "https://open.spotify.com/show/5KIIXMgM9GTi5AepwOYvIZ?si=bd3053fec7244dba",
"rss": "https://feeds.megaphone.fm/KQINC9137121918"
}
},
"theleap": {
"id": "theleap",
"title": "The Leap",
"tagline": "What if you closed your eyes, and jumped?",
"info": "Stories about people making dramatic, risky changes, told by award-winning public radio reporter Judy Campbell.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Leap-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Leap",
"officialWebsiteLink": "/podcasts/theleap",
"meta": {
"site": "news",
"source": "kqed",
"order": 17
},
"link": "/podcasts/theleap",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-leap/id1046668171",
"npr": "https://www.npr.org/podcasts/447248267/the-leap",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-leap",
"spotify": "https://open.spotify.com/show/3sSlVHHzU0ytLwuGs1SD1U",
"rss": "https://ww2.kqed.org/news/programs/the-leap/feed/podcast"
}
},
"the-moth-radio-hour": {
"id": "the-moth-radio-hour",
"title": "The Moth Radio Hour",
"info": "Since its launch in 1997, The Moth has presented thousands of true stories, told live and without notes, to standing-room-only crowds worldwide. Moth storytellers stand alone, under a spotlight, with only a microphone and a roomful of strangers. The storyteller and the audience embark on a high-wire act of shared experience which is both terrifying and exhilarating. Since 2008, The Moth podcast has featured many of our favorite stories told live on Moth stages around the country. For information on all of our programs and live events, visit themoth.org.",
"airtime": "SAT 8pm-9pm and SUN 11am-12pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/theMoth.jpg",
"officialWebsiteLink": "https://themoth.org/",
"meta": {
"site": "arts",
"source": "prx"
},
"link": "/radio/program/the-moth-radio-hour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/the-moth-podcast/id275699983?mt=2",
"tuneIn": "https://tunein.com/radio/The-Moth-p273888/",
"rss": "http://feeds.themoth.org/themothpodcast"
}
},
"the-new-yorker-radio-hour": {
"id": "the-new-yorker-radio-hour",
"title": "The New Yorker Radio Hour",
"info": "The New Yorker Radio Hour is a weekly program presented by the magazine's editor, David Remnick, and produced by WNYC Studios and The New Yorker. Each episode features a diverse mix of interviews, profiles, storytelling, and an occasional burst of humor inspired by the magazine, and shaped by its writers, artists, and editors. This isn't a radio version of a magazine, but something all its own, reflecting the rich possibilities of audio storytelling and conversation. Theme music for the show was composed and performed by Merrill Garbus of tUnE-YArDs.",
"airtime": "SAT 10am-11am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-New-Yorker-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/tnyradiohour",
"meta": {
"site": "arts",
"source": "WNYC"
},
"link": "/radio/program/the-new-yorker-radio-hour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/id1050430296",
"tuneIn": "https://tunein.com/podcasts/WNYC-Podcasts/New-Yorker-Radio-Hour-p803804/",
"rss": "https://feeds.feedburner.com/newyorkerradiohour"
}
},
"the-sam-sanders-show": {
"id": "the-sam-sanders-show",
"title": "The Sam Sanders Show",
"info": "One of public radio's most dynamic voices, Sam Sanders helped launch The NPR Politics Podcast and hosted NPR's hit show It's Been A Minute. Now, the award-winning host returns with something brand new, The Sam Sanders Show. Every week, Sam Sanders and friends dig into the culture that shapes our lives: what's driving the biggest trends, how artists really think, and even the memes you can't stop scrolling past. Sam is beloved for his way of unpacking the world and bringing you up close to fresh currents and engaging conversations. The Sam Sanders Show is smart, funny and always a good time.",
"airtime": "FRI 12-1pm AND SAT 11am-12pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/11/The-Sam-Sanders-Show-Podcast-Tile-400x400-1.jpg",
"officialWebsiteLink": "https://www.kcrw.com/shows/the-sam-sanders-show/latest",
"meta": {
"site": "arts",
"source": "KCRW"
},
"link": "https://www.kcrw.com/shows/the-sam-sanders-show/latest",
"subscribe": {
"rss": "https://feed.cdnstream1.com/zjb/feed/download/ac/28/59/ac28594c-e1d0-4231-8728-61865cdc80e8.xml"
}
},
"the-splendid-table": {
"id": "the-splendid-table",
"title": "The Splendid Table",
"info": "\u003cem>The Splendid Table\u003c/em> hosts our nation's conversations about cooking, sustainability and food culture.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Splendid-Table-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.splendidtable.org/",
"airtime": "SUN 10-11 pm",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/the-splendid-table"
},
"this-american-life": {
"id": "this-american-life",
"title": "This American Life",
"info": "This American Life is a weekly public radio show, heard by 2.2 million people on more than 500 stations. Another 2.5 million people download the weekly podcast. It is hosted by Ira Glass, produced in collaboration with Chicago Public Media, delivered to stations by PRX The Public Radio Exchange, and has won all of the major broadcasting awards.",
"airtime": "SAT 12pm-1pm, 7pm-8pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/thisAmericanLife.png",
"officialWebsiteLink": "https://www.thisamericanlife.org/",
"meta": {
"site": "news",
"source": "wbez"
},
"link": "/radio/program/this-american-life",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=201671138&at=11l79Y&ct=nprdirectory",
"rss": "https://www.thisamericanlife.org/podcast/rss.xml"
}
},
"tinydeskradio": {
"id": "tinydeskradio",
"title": "Tiny Desk Radio",
"info": "We're bringing the best of Tiny Desk to the airwaves, only on public radio.",
"airtime": "SUN 8pm and SAT 9pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/04/300x300-For-Member-Station-Logo-Tiny-Desk-Radio-@2x.png",
"officialWebsiteLink": "https://www.npr.org/series/g-s1-52030/tiny-desk-radio",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/tinydeskradio",
"subscribe": {
"rss": "https://feeds.npr.org/g-s1-52030/rss.xml"
}
},
"wait-wait-dont-tell-me": {
"id": "wait-wait-dont-tell-me",
"title": "Wait Wait... Don't Tell Me!",
"info": "Peter Sagal and Bill Kurtis host the weekly NPR News quiz show alongside some of the best and brightest news and entertainment personalities.",
"airtime": "SUN 10am-11am, SAT 11am-12pm, SAT 6pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Wait-Wait-Podcast-Tile-300x300-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/wait-wait-dont-tell-me/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/wait-wait-dont-tell-me",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/Xogv",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=121493804&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Wait-Wait-Dont-Tell-Me-p46/",
"rss": "https://feeds.npr.org/344098539/podcast.xml"
}
},
"weekend-edition-saturday": {
"id": "weekend-edition-saturday",
"title": "Weekend Edition Saturday",
"info": "Weekend Edition Saturday wraps up the week's news and offers a mix of analysis and features on a wide range of topics, including arts, sports, entertainment, and human interest stories. The two-hour program is hosted by NPR's Peabody Award-winning Scott Simon.",
"airtime": "SAT 5am-10am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Weekend-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/weekend-edition-saturday/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/weekend-edition-saturday"
},
"weekend-edition-sunday": {
"id": "weekend-edition-sunday",
"title": "Weekend Edition Sunday",
"info": "Weekend Edition Sunday features interviews with newsmakers, artists, scientists, politicians, musicians, writers, theologians and historians. The program has covered news events from Nelson Mandela's 1990 release from a South African prison to the capture of Saddam Hussein.",
"airtime": "SUN 5am-10am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Weekend-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/weekend-edition-sunday/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/weekend-edition-sunday"
}
},
"racesReducer": {},
"racesGenElectionReducer": {},
"radioSchedulesReducer": {},
"listsReducer": {
"posts/news?tag=artificial-intelligence": {
"isFetching": false,
"latestQuery": {
"from": 0,
"postsToRender": 9
},
"tag": null,
"vitalsOnly": true,
"totalRequested": 9,
"isLoading": false,
"isLoadingMore": true,
"total": {
"value": 75,
"relation": "eq"
},
"items": [
"news_12083612",
"news_12083428",
"news_12083278",
"news_12083224",
"news_12082428",
"news_12082064",
"news_12081798",
"news_12081603",
"news_12081290"
]
}
},
"recallGuideReducer": {
"intros": {},
"policy": {},
"candidates": {}
},
"savedArticleReducer": {
"articles": [],
"status": {}
},
"pfsSessionReducer": {},
"subscriptionsReducer": {},
"termsReducer": {
"about": {
"name": "About",
"type": "terms",
"id": "about",
"slug": "about",
"link": "/about",
"taxonomy": "site"
},
"arts": {
"name": "Arts & Culture",
"grouping": [
"arts",
"pop",
"trulyca"
],
"description": "KQED Arts provides daily in-depth coverage of the Bay Area's music, art, film, performing arts, literature and arts news, as well as cultural commentary and criticism.",
"type": "terms",
"id": "arts",
"slug": "arts",
"link": "/arts",
"taxonomy": "site"
},
"artschool": {
"name": "Art School",
"parent": "arts",
"type": "terms",
"id": "artschool",
"slug": "artschool",
"link": "/artschool",
"taxonomy": "site"
},
"bayareabites": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"parent": "food",
"type": "terms",
"id": "bayareabites",
"slug": "bayareabites",
"link": "/food",
"taxonomy": "site"
},
"bayareahiphop": {
"name": "Bay Area Hiphop",
"type": "terms",
"id": "bayareahiphop",
"slug": "bayareahiphop",
"link": "/bayareahiphop",
"taxonomy": "site"
},
"campaign21": {
"name": "Campaign 21",
"type": "terms",
"id": "campaign21",
"slug": "campaign21",
"link": "/campaign21",
"taxonomy": "site"
},
"checkplease": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"parent": "food",
"type": "terms",
"id": "checkplease",
"slug": "checkplease",
"link": "/food",
"taxonomy": "site"
},
"education": {
"name": "Education",
"grouping": [
"education"
],
"type": "terms",
"id": "education",
"slug": "education",
"link": "/education",
"taxonomy": "site"
},
"elections": {
"name": "Elections",
"type": "terms",
"id": "elections",
"slug": "elections",
"link": "/elections",
"taxonomy": "site"
},
"events": {
"name": "Events",
"type": "terms",
"id": "events",
"slug": "events",
"link": "/events",
"taxonomy": "site"
},
"event": {
"name": "Event",
"alias": "events",
"type": "terms",
"id": "event",
"slug": "event",
"link": "/event",
"taxonomy": "site"
},
"filmschoolshorts": {
"name": "Film School Shorts",
"type": "terms",
"id": "filmschoolshorts",
"slug": "filmschoolshorts",
"link": "/filmschoolshorts",
"taxonomy": "site"
},
"food": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"type": "terms",
"id": "food",
"slug": "food",
"link": "/food",
"taxonomy": "site"
},
"forum": {
"name": "Forum",
"relatedContentQuery": "posts/forum?",
"parent": "news",
"type": "terms",
"id": "forum",
"slug": "forum",
"link": "/forum",
"taxonomy": "site"
},
"futureofyou": {
"name": "Future of You",
"grouping": [
"science",
"futureofyou"
],
"parent": "science",
"type": "terms",
"id": "futureofyou",
"slug": "futureofyou",
"link": "/futureofyou",
"taxonomy": "site"
},
"jpepinheart": {
"name": "KQED food",
"relatedContentQuery": "posts/food,bayareabites,checkplease",
"parent": "food",
"type": "terms",
"id": "jpepinheart",
"slug": "jpepinheart",
"link": "/food",
"taxonomy": "site"
},
"liveblog": {
"name": "Live Blog",
"type": "terms",
"id": "liveblog",
"slug": "liveblog",
"link": "/liveblog",
"taxonomy": "site"
},
"livetv": {
"name": "Live TV",
"parent": "tv",
"type": "terms",
"id": "livetv",
"slug": "livetv",
"link": "/livetv",
"taxonomy": "site"
},
"lowdown": {
"name": "The Lowdown",
"relatedContentQuery": "posts/lowdown?",
"parent": "news",
"type": "terms",
"id": "lowdown",
"slug": "lowdown",
"link": "/lowdown",
"taxonomy": "site"
},
"mindshift": {
"name": "Mindshift",
"parent": "news",
"description": "MindShift explores the future of education by highlighting the innovative – and sometimes counterintuitive – ways educators and parents are helping all children succeed.",
"type": "terms",
"id": "mindshift",
"slug": "mindshift",
"link": "/mindshift",
"taxonomy": "site"
},
"news": {
"name": "News",
"grouping": [
"news",
"forum"
],
"type": "terms",
"id": "news",
"slug": "news",
"link": "/news",
"taxonomy": "site"
},
"perspectives": {
"name": "Perspectives",
"parent": "radio",
"type": "terms",
"id": "perspectives",
"slug": "perspectives",
"link": "/perspectives",
"taxonomy": "site"
},
"podcasts": {
"name": "Podcasts",
"type": "terms",
"id": "podcasts",
"slug": "podcasts",
"link": "/podcasts",
"taxonomy": "site"
},
"pop": {
"name": "Pop",
"parent": "arts",
"type": "terms",
"id": "pop",
"slug": "pop",
"link": "/pop",
"taxonomy": "site"
},
"pressroom": {
"name": "Pressroom",
"type": "terms",
"id": "pressroom",
"slug": "pressroom",
"link": "/pressroom",
"taxonomy": "site"
},
"quest": {
"name": "Quest",
"parent": "science",
"type": "terms",
"id": "quest",
"slug": "quest",
"link": "/quest",
"taxonomy": "site"
},
"radio": {
"name": "Radio",
"grouping": [
"forum",
"perspectives"
],
"description": "Listen to KQED Public Radio – home of Forum and The California Report – on 88.5 FM in San Francisco, 89.3 FM in Sacramento, 88.3 FM in Santa Rosa and 88.1 FM in Martinez.",
"type": "terms",
"id": "radio",
"slug": "radio",
"link": "/radio",
"taxonomy": "site"
},
"root": {
"name": "KQED",
"image": "https://ww2.kqed.org/app/uploads/2020/02/KQED-OG-Image@1x.png",
"imageWidth": 1200,
"imageHeight": 630,
"headData": {
"title": "KQED | News, Radio, Podcasts, TV | Public Media for Northern California",
"description": "KQED provides public radio, television, and independent reporting on issues that matter to the Bay Area. We’re the NPR and PBS member station for Northern California."
},
"type": "terms",
"id": "root",
"slug": "root",
"link": "/root",
"taxonomy": "site"
},
"science": {
"name": "Science",
"grouping": [
"science",
"futureofyou"
],
"description": "KQED Science brings you award-winning science and environment coverage from the Bay Area and beyond.",
"type": "terms",
"id": "science",
"slug": "science",
"link": "/science",
"taxonomy": "site"
},
"stateofhealth": {
"name": "State of Health",
"parent": "science",
"type": "terms",
"id": "stateofhealth",
"slug": "stateofhealth",
"link": "/stateofhealth",
"taxonomy": "site"
},
"support": {
"name": "Support",
"type": "terms",
"id": "support",
"slug": "support",
"link": "/support",
"taxonomy": "site"
},
"thedolist": {
"name": "The Do List",
"parent": "arts",
"type": "terms",
"id": "thedolist",
"slug": "thedolist",
"link": "/thedolist",
"taxonomy": "site"
},
"trulyca": {
"name": "Truly CA",
"grouping": [
"arts",
"pop",
"trulyca"
],
"parent": "arts",
"type": "terms",
"id": "trulyca",
"slug": "trulyca",
"link": "/trulyca",
"taxonomy": "site"
},
"tv": {
"name": "TV",
"type": "terms",
"id": "tv",
"slug": "tv",
"link": "/tv",
"taxonomy": "site"
},
"voterguide": {
"name": "Voter Guide",
"parent": "elections",
"alias": "elections",
"type": "terms",
"id": "voterguide",
"slug": "voterguide",
"link": "/voterguide",
"taxonomy": "site"
},
"guiaelectoral": {
"name": "Guia Electoral",
"parent": "elections",
"alias": "elections",
"type": "terms",
"id": "guiaelectoral",
"slug": "guiaelectoral",
"link": "/guiaelectoral",
"taxonomy": "site"
},
"news_34755": {
"type": "terms",
"id": "news_34755",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34755",
"found": true
},
"relationships": {},
"name": "artificial intelligence",
"slug": "artificial-intelligence",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "artificial intelligence | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null,
"imageData": {
"ogImageSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"width": 1200,
"height": 630
},
"twImageSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png"
},
"twitterCard": "summary_large_image"
}
},
"ttid": 34772,
"isLoading": false,
"link": "/news/tag/artificial-intelligence"
},
"source_news_12083428": {
"type": "terms",
"id": "source_news_12083428",
"meta": {
"override": true
},
"name": "Close All Tabs",
"link": "https://www.kqed.org/podcasts/closealltabs",
"isLoading": false
},
"source_news_12082428": {
"type": "terms",
"id": "source_news_12082428",
"meta": {
"override": true
},
"name": "The Bay",
"link": "https://www.kqed.org/podcasts/thebay",
"isLoading": false
},
"news_31795": {
"type": "terms",
"id": "news_31795",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "31795",
"found": true
},
"relationships": {},
"featImg": null,
"name": "California",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "California Archives | KQED News",
"ogDescription": null
},
"ttid": 31812,
"slug": "california",
"isLoading": false,
"link": "/news/category/california"
},
"news_28250": {
"type": "terms",
"id": "news_28250",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "28250",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Local",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Local Archives | KQED News",
"ogDescription": null
},
"ttid": 28267,
"slug": "local",
"isLoading": false,
"link": "/news/category/local"
},
"news_8": {
"type": "terms",
"id": "news_8",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "8",
"found": true
},
"relationships": {},
"featImg": null,
"name": "News",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "News Archives | KQED News",
"ogDescription": null
},
"ttid": 8,
"slug": "news",
"isLoading": false,
"link": "/news/category/news"
},
"news_248": {
"type": "terms",
"id": "news_248",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "248",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Technology",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Technology Archives | KQED News",
"ogDescription": null
},
"ttid": 256,
"slug": "technology",
"isLoading": false,
"link": "/news/category/technology"
},
"news_1386": {
"type": "terms",
"id": "news_1386",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "1386",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Bay Area",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Bay Area Archives | KQED News",
"ogDescription": null
},
"ttid": 1398,
"slug": "bay-area",
"isLoading": false,
"link": "/news/tag/bay-area"
},
"news_32668": {
"type": "terms",
"id": "news_32668",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "32668",
"found": true
},
"relationships": {},
"featImg": null,
"name": "ChatGPT",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "ChatGPT Archives | KQED News",
"ogDescription": null
},
"ttid": 32685,
"slug": "chatgpt",
"isLoading": false,
"link": "/news/tag/chatgpt"
},
"news_3897": {
"type": "terms",
"id": "news_3897",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "3897",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Elon Musk",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Elon Musk Archives | KQED News",
"ogDescription": null
},
"ttid": 3916,
"slug": "elon-musk",
"isLoading": false,
"link": "/news/tag/elon-musk"
},
"news_21891": {
"type": "terms",
"id": "news_21891",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "21891",
"found": true
},
"relationships": {},
"featImg": null,
"name": "lawsuits",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "lawsuits Archives | KQED News",
"ogDescription": null
},
"ttid": 21908,
"slug": "lawsuits",
"isLoading": false,
"link": "/news/tag/lawsuits"
},
"news_34054": {
"type": "terms",
"id": "news_34054",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34054",
"found": true
},
"relationships": {},
"featImg": null,
"name": "oakland",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "oakland Archives | KQED News",
"ogDescription": null
},
"ttid": 34071,
"slug": "oakland",
"isLoading": false,
"link": "/news/tag/oakland"
},
"news_33542": {
"type": "terms",
"id": "news_33542",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33542",
"found": true
},
"relationships": {},
"featImg": null,
"name": "OpenAI",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "OpenAI Archives | KQED News",
"ogDescription": null
},
"ttid": 33559,
"slug": "openai",
"isLoading": false,
"link": "/news/tag/openai"
},
"news_33543": {
"type": "terms",
"id": "news_33543",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33543",
"found": true
},
"relationships": {},
"name": "Sam Altman",
"slug": "sam-altman",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Sam Altman | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null,
"metaRobotsNoIndex": "noindex"
},
"ttid": 33560,
"isLoading": false,
"link": "/news/tag/sam-altman"
},
"news_34586": {
"type": "terms",
"id": "news_34586",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34586",
"found": true
},
"relationships": {},
"name": "Silicon Valley",
"slug": "silicon-valley",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Silicon Valley | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 34603,
"isLoading": false,
"link": "/news/tag/silicon-valley"
},
"news_1631": {
"type": "terms",
"id": "news_1631",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "1631",
"found": true
},
"relationships": {},
"name": "Technology",
"slug": "technology",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Technology | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 1643,
"isLoading": false,
"link": "/news/tag/technology"
},
"news_33733": {
"type": "terms",
"id": "news_33733",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33733",
"found": true
},
"relationships": {},
"featImg": null,
"name": "News",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "News Archives | KQED News",
"ogDescription": null
},
"ttid": 33750,
"slug": "news",
"isLoading": false,
"link": "/news/interest/news"
},
"news_33730": {
"type": "terms",
"id": "news_33730",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33730",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Oakland",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Oakland Archives | KQED News",
"ogDescription": null
},
"ttid": 33747,
"slug": "oakland",
"isLoading": false,
"link": "/news/interest/oakland"
},
"news_33732": {
"type": "terms",
"id": "news_33732",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33732",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Technology",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Technology Archives | KQED News",
"ogDescription": null
},
"ttid": 33749,
"slug": "technology",
"isLoading": false,
"link": "/news/interest/technology"
},
"news_35082": {
"type": "terms",
"id": "news_35082",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "35082",
"found": true
},
"relationships": {},
"name": "Close All Tabs",
"slug": "close-all-tabs",
"taxonomy": "program",
"description": null,
"featImg": null,
"headData": {
"title": "Close All Tabs | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 35099,
"isLoading": false,
"link": "/news/program/close-all-tabs"
},
"news_33520": {
"type": "terms",
"id": "news_33520",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33520",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Podcast",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Podcast Archives | KQED News",
"ogDescription": null
},
"ttid": 33537,
"slug": "podcast",
"isLoading": false,
"link": "/news/category/podcast"
},
"news_25184": {
"type": "terms",
"id": "news_25184",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "25184",
"found": true
},
"relationships": {},
"featImg": null,
"name": "AI",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "AI Archives | KQED News",
"ogDescription": null
},
"ttid": 25201,
"slug": "ai",
"isLoading": false,
"link": "/news/tag/ai"
},
"news_22973": {
"type": "terms",
"id": "news_22973",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "22973",
"found": true
},
"relationships": {},
"featImg": null,
"name": "culture",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "culture Archives | KQED News",
"ogDescription": null
},
"ttid": 22990,
"slug": "culture",
"isLoading": false,
"link": "/news/tag/culture"
},
"news_3137": {
"type": "terms",
"id": "news_3137",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "3137",
"found": true
},
"relationships": {},
"featImg": null,
"name": "internet",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "internet Archives | KQED News",
"ogDescription": null
},
"ttid": 3155,
"slug": "internet",
"isLoading": false,
"link": "/news/tag/internet"
},
"news_34646": {
"type": "terms",
"id": "news_34646",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34646",
"found": true
},
"relationships": {},
"name": "internet culture",
"slug": "internet-culture",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "internet culture | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 34663,
"isLoading": false,
"link": "/news/tag/internet-culture"
},
"news_2414": {
"type": "terms",
"id": "news_2414",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "2414",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Internet Privacy",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Internet Privacy Archives | KQED News",
"ogDescription": null
},
"ttid": 2429,
"slug": "internet-privacy",
"isLoading": false,
"link": "/news/tag/internet-privacy"
},
"news_1859": {
"type": "terms",
"id": "news_1859",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "1859",
"found": true
},
"relationships": {},
"featImg": null,
"name": "privacy",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "privacy Archives | KQED News",
"ogDescription": null
},
"ttid": 1874,
"slug": "privacy",
"isLoading": false,
"link": "/news/tag/privacy"
},
"news_4837": {
"type": "terms",
"id": "news_4837",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "4837",
"found": true
},
"relationships": {},
"featImg": null,
"name": "revenge porn",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "revenge porn Archives | KQED News",
"ogDescription": null
},
"ttid": 4856,
"slug": "revenge-porn",
"isLoading": false,
"link": "/news/tag/revenge-porn"
},
"news_6188": {
"type": "terms",
"id": "news_6188",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "6188",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Law and Justice",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Law and Justice Archives | KQED News",
"ogDescription": null
},
"ttid": 6212,
"slug": "law-and-justice",
"isLoading": false,
"link": "/news/category/law-and-justice"
},
"news_27626": {
"type": "terms",
"id": "news_27626",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "27626",
"found": true
},
"relationships": {},
"featImg": null,
"name": "featured-news",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "featured-news Archives | KQED News",
"ogDescription": null
},
"ttid": 27643,
"slug": "featured-news",
"isLoading": false,
"link": "/news/tag/featured-news"
},
"news_19954": {
"type": "terms",
"id": "news_19954",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "19954",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Law and Justice",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Law and Justice Archives | KQED News",
"ogDescription": null
},
"ttid": 19971,
"slug": "law-and-justice",
"isLoading": false,
"link": "/news/tag/law-and-justice"
},
"news_36810": {
"type": "terms",
"id": "news_36810",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "36810",
"found": true
},
"relationships": {},
"name": "federal trial",
"slug": "federal-trial",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "federal trial | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 36827,
"isLoading": false,
"link": "/news/tag/federal-trial"
},
"news_33812": {
"type": "terms",
"id": "news_33812",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33812",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Interests",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Interests Archives | KQED News",
"ogDescription": null
},
"ttid": 33829,
"slug": "interests",
"isLoading": false,
"link": "/news/tag/interests"
},
"news_35758": {
"type": "terms",
"id": "news_35758",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "35758",
"found": true
},
"relationships": {},
"name": "Open AI",
"slug": "open-ai",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Open AI | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 35775,
"isLoading": false,
"link": "/news/tag/open-ai"
},
"news_22598": {
"type": "terms",
"id": "news_22598",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "22598",
"found": true
},
"relationships": {},
"featImg": null,
"name": "The Bay",
"description": "\u003cimg class=\"alignnone size-medium wp-image-11638190\" src=\"https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/02/TheBay_1200x6301.png\" alt=\"\" />\r\n\u003cbr/>\r\n\r\nEvery good story starts local. So that’s where we start. \u003ci>The Bay\u003c/i> is storytelling for daily news. KQED host Devin Katayama talks with reporters to help us make sense of what’s happening in the Bay Area. One story. One conversation. One idea.\r\n\r\n\u003cstrong>Subscribe to The Bay:\u003c/strong>\r\n\r\n\u003ca href=\"https://itunes.apple.com/us/podcast/the-bay/id1350043452?mt=2\">\u003cimg src=\"https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/01/Listen_on_Apple_Podcasts_sRGB_US-e1515635079510.png\" />\u003c/a>",
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": "Every good story starts local. So that’s where we start. The Bay is storytelling for daily news. KQED host Devin Katayama talks with reporters to help us make sense of what’s happening in the Bay Area. One story. One conversation. One idea. Subscribe to The Bay:",
"title": "The Bay Archives | KQED News",
"ogDescription": null
},
"ttid": 22615,
"slug": "the-bay",
"isLoading": false,
"link": "/news/tag/the-bay"
},
"news_34167": {
"type": "terms",
"id": "news_34167",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34167",
"found": true
},
"relationships": {},
"name": "Criminal Justice",
"slug": "criminal-justice",
"taxonomy": "category",
"description": null,
"featImg": null,
"headData": {
"title": "Criminal Justice Archives | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 34184,
"isLoading": false,
"link": "/news/category/criminal-justice"
},
"news_17725": {
"type": "terms",
"id": "news_17725",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "17725",
"found": true
},
"relationships": {},
"featImg": null,
"name": "criminal justice",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "criminal justice Archives | KQED News",
"ogDescription": null
},
"ttid": 17759,
"slug": "criminal-justice",
"isLoading": false,
"link": "/news/tag/criminal-justice"
},
"news_22434": {
"type": "terms",
"id": "news_22434",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "22434",
"found": true
},
"relationships": {},
"featImg": null,
"name": "death",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "death Archives | KQED News",
"ogDescription": null
},
"ttid": 22451,
"slug": "death",
"isLoading": false,
"link": "/news/tag/death"
},
"news_35784": {
"type": "terms",
"id": "news_35784",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "35784",
"found": true
},
"relationships": {},
"name": "gun violence",
"slug": "gun-violence",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "gun violence | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 35801,
"isLoading": false,
"link": "/news/tag/gun-violence"
},
"news_38": {
"type": "terms",
"id": "news_38",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "38",
"found": true
},
"relationships": {},
"featImg": null,
"name": "San Francisco",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "San Francisco Archives | KQED News",
"ogDescription": null
},
"ttid": 58,
"slug": "san-francisco",
"isLoading": false,
"link": "/news/tag/san-francisco"
},
"news_33745": {
"type": "terms",
"id": "news_33745",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33745",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Criminal Justice",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Criminal Justice Archives | KQED News",
"ogDescription": null
},
"ttid": 33762,
"slug": "criminal-justice",
"isLoading": false,
"link": "/news/interest/criminal-justice"
},
"news_33729": {
"type": "terms",
"id": "news_33729",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33729",
"found": true
},
"relationships": {},
"featImg": null,
"name": "San Francisco",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "San Francisco Archives | KQED News",
"ogDescription": null
},
"ttid": 33746,
"slug": "san-francisco",
"isLoading": false,
"link": "/news/interest/san-francisco"
},
"news_57": {
"type": "terms",
"id": "news_57",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "57",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Tesla",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Tesla Archives | KQED News",
"ogDescription": null
},
"ttid": 57,
"slug": "tesla",
"isLoading": false,
"link": "/news/tag/tesla"
},
"news_18352": {
"type": "terms",
"id": "news_18352",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "18352",
"found": true
},
"relationships": {},
"featImg": null,
"name": "East Bay",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "East Bay Archives | KQED News",
"ogDescription": null
},
"ttid": 18386,
"slug": "east-bay",
"isLoading": false,
"link": "/news/tag/east-bay"
},
"news_18538": {
"type": "terms",
"id": "news_18538",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "18538",
"found": true
},
"relationships": {},
"featImg": null,
"name": "California",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "California Archives | KQED News",
"ogDescription": null
},
"ttid": 31,
"slug": "california",
"isLoading": false,
"link": "/news/tag/california"
},
"news_23052": {
"type": "terms",
"id": "news_23052",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "23052",
"found": true
},
"relationships": {},
"featImg": null,
"name": "fraud",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "fraud Archives | KQED News",
"ogDescription": null
},
"ttid": 23069,
"slug": "fraud",
"isLoading": false,
"link": "/news/tag/fraud"
},
"news_33738": {
"type": "terms",
"id": "news_33738",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33738",
"found": true
},
"relationships": {},
"featImg": null,
"name": "California",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "California Archives | KQED News",
"ogDescription": null
},
"ttid": 33755,
"slug": "california",
"isLoading": false,
"link": "/news/interest/california"
}
},
"userAgentReducer": {
"userAgent": "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko; compatible; ClaudeBot/1.0; +claudebot@anthropic.com)",
"isBot": true
},
"userPermissionsReducer": {
"wpLoggedIn": false
},
"localStorageReducer": {},
"browserHistoryReducer": [],
"eventsReducer": {},
"fssReducer": {},
"tvDailyScheduleReducer": {},
"tvWeeklyScheduleReducer": {},
"tvPrimetimeScheduleReducer": {},
"tvMonthlyScheduleReducer": {},
"userAccountReducer": {
"user": {
"email": null,
"emailStatus": "EMAIL_UNVALIDATED",
"loggedStatus": "LOGGED_OUT",
"loggingChecked": false,
"articles": [],
"firstName": null,
"lastName": null,
"phoneNumber": null,
"fetchingMembership": false,
"membershipError": false,
"memberships": [
{
"id": null,
"startDate": null,
"firstName": null,
"lastName": null,
"familyNumber": null,
"memberNumber": null,
"memberSince": null,
"expirationDate": null,
"pfsEligible": false,
"isSustaining": false,
"membershipLevel": "Prospect",
"membershipStatus": "Non Member",
"lastGiftDate": null,
"renewalDate": null,
"lastDonationAmount": null
}
]
},
"authModal": {
"isOpen": false,
"view": "LANDING_VIEW"
},
"error": null
},
"youthMediaReducer": {},
"checkPleaseReducer": {
"filterData": {
"region": {
"key": "Restaurant Region",
"filters": [
"Any Region"
]
},
"cuisine": {
"key": "Restaurant Cuisine",
"filters": [
"Any Cuisine"
]
}
},
"restaurantDataById": {},
"restaurantIdsSorted": [],
"error": null
},
"location": {
"pathname": "/news/tag/artificial-intelligence",
"previousPathname": "/"
}
}