window.__IS_SSR__=true
window.__INITIAL_STATE__={
"attachmentsReducer": {
"audio_0": {
"type": "attachments",
"id": "audio_0",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background0.jpg"
}
}
},
"audio_1": {
"type": "attachments",
"id": "audio_1",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background1.jpg"
}
}
},
"audio_2": {
"type": "attachments",
"id": "audio_2",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background2.jpg"
}
}
},
"audio_3": {
"type": "attachments",
"id": "audio_3",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background3.jpg"
}
}
},
"audio_4": {
"type": "attachments",
"id": "audio_4",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background4.jpg"
}
}
},
"placeholder": {
"type": "attachments",
"id": "placeholder",
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"medium_large": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-768x512.jpg",
"width": 768,
"height": 512,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"fd-lrg": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"fd-med": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"fd-sm": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"xxsmall": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"xsmall": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"small": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"xlarge": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1920x1280.jpg",
"width": 1920,
"height": 1280,
"mimeType": "image/jpeg"
},
"guest-author-32": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 32,
"height": 32,
"mimeType": "image/jpeg"
},
"guest-author-50": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 50,
"height": 50,
"mimeType": "image/jpeg"
},
"guest-author-64": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 64,
"height": 64,
"mimeType": "image/jpeg"
},
"guest-author-96": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 96,
"height": 96,
"mimeType": "image/jpeg"
},
"guest-author-128": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 128,
"height": 128,
"mimeType": "image/jpeg"
},
"detail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 160,
"height": 160,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1.jpg",
"width": 2000,
"height": 1333
}
}
},
"news_12063465": {
"type": "attachments",
"id": "news_12063465",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12063465",
"found": true
},
"title": "Low Angle View Of Woman Using Mobile Phone While Sitting On Bed In Darkroom",
"publishDate": 1762550603,
"status": "inherit",
"parent": 12063401,
"modified": 1762550628,
"caption": "Individuals and families in the U.S. and Canada are suing OpenAI in California, alleging that they or their loved ones have been harmed by their interactions with ChatGPT.",
"credit": "EyeEm Mobile GmbH/Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/OpenAiLawsuitsGetty-160x120.jpg",
"width": 160,
"height": 120,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/OpenAiLawsuitsGetty-1536x1152.jpg",
"width": 1536,
"height": 1152,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/OpenAiLawsuitsGetty-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/OpenAiLawsuitsGetty-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/OpenAiLawsuitsGetty-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/OpenAiLawsuitsGetty.jpg",
"width": 2000,
"height": 1500
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12060375": {
"type": "attachments",
"id": "news_12060375",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12060375",
"found": true
},
"title": "US-TECH-AI-ALTMAN",
"publishDate": 1760733500,
"status": "inherit",
"parent": 12060365,
"modified": 1760733569,
"caption": "OpenAI CEO Sam Altman speaks at OpenAI DevDay, the company's annual conference for developers, in San Francisco, California, on Oct. 6, 2025. ",
"credit": "Benjamin Legendre/AFP via Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty-160x108.jpg",
"width": 160,
"height": 108,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty-1536x1034.jpg",
"width": 1536,
"height": 1034,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty.jpg",
"width": 2000,
"height": 1347
}
},
"fetchFailed": false,
"isLoading": false
},
"news_11998856": {
"type": "attachments",
"id": "news_11998856",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "11998856",
"found": true
},
"title": "In this photo illustration, the ChatGPT logo is displayed on",
"publishDate": 1722890738,
"status": "inherit",
"parent": 11998817,
"modified": 1722890828,
"caption": "Several bills addressing generative artificial intelligence are moving through the state Legislature in California’s piecemeal approach to regulation.",
"credit": "Jaque Silva/SOPA Images/LightRocket via Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/ChatGPTGetty-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/ChatGPTGetty-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/ChatGPTGetty-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/ChatGPTGetty-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/ChatGPTGetty-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/ChatGPTGetty-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/ChatGPTGetty-1920x1280.jpg",
"width": 1920,
"height": 1280,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/ChatGPTGetty.jpg",
"width": 2000,
"height": 1333
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12039151": {
"type": "attachments",
"id": "news_12039151",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12039151",
"found": true
},
"title": "NKOREAFAMILY",
"publishDate": 1746646130,
"status": "inherit",
"parent": 12038874,
"modified": 1746646165,
"caption": "Just as adults are using generative AI companion chatbots for solace and connection, children are, too. So, how can parents stay in the conversation?",
"credit": "Anna Fifield/The Washington Post via Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/AIChatBotsGetty-800x600.jpg",
"width": 800,
"height": 600,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/AIChatBotsGetty-1020x765.jpg",
"width": 1020,
"height": 765,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/AIChatBotsGetty-160x120.jpg",
"width": 160,
"height": 120,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/AIChatBotsGetty-1536x1152.jpg",
"width": 1536,
"height": 1152,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/AIChatBotsGetty-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/AIChatBotsGetty-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/AIChatBotsGetty-1920x1440.jpg",
"width": 1920,
"height": 1440,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/AIChatBotsGetty.jpg",
"width": 2000,
"height": 1500
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12038161": {
"type": "attachments",
"id": "news_12038161",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12038161",
"found": true
},
"title": "CHINA-TECHNOLOGY-AI-ROMANCE",
"publishDate": 1745968881,
"status": "inherit",
"parent": 12038154,
"modified": 1746042249,
"caption": "Unlike digital assistants, companion chatbots are much more likely to veer into socially controversial and even illegal territory. A new report out from Stanford University researchers and Common Sense Media argues that children and teens should not use these chatbots.",
"credit": "Jade Gao/AFP via Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/StanfordStudyAIChatbotsKidsGetty-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/StanfordStudyAIChatbotsKidsGetty-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/StanfordStudyAIChatbotsKidsGetty-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/StanfordStudyAIChatbotsKidsGetty-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/StanfordStudyAIChatbotsKidsGetty-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/StanfordStudyAIChatbotsKidsGetty-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/StanfordStudyAIChatbotsKidsGetty-1920x1280.jpg",
"width": 1920,
"height": 1280,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/StanfordStudyAIChatbotsKidsGetty.jpg",
"width": 2000,
"height": 1333
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12030059": {
"type": "attachments",
"id": "news_12030059",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12030059",
"found": true
},
"title": "US California Governor Wildfires",
"publishDate": 1741285002,
"status": "inherit",
"parent": 12030057,
"modified": 1741306392,
"caption": "Gov. Gavin Newsom holds a fireside chat with Stephen Cheung, the President and Chief Executive Officer of the Los Angeles County Economic Development Corporation (LAEDC) and its subsidiary, the World Trade Center Los Angeles (WTCLA), at the 2025 Economic Forecast and Industry Outlook convening on Wednesday, Feb. 26, 2025, at the East LA College in Los Angeles.",
"credit": "Damian Dovarganes/AP Photo",
"altTag": null,
"description": null,
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/03/GavinNewsom2025AP-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/03/GavinNewsom2025AP-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/03/GavinNewsom2025AP-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/03/GavinNewsom2025AP-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/03/GavinNewsom2025AP-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/03/GavinNewsom2025AP-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/03/GavinNewsom2025AP-1920x1280.jpg",
"width": 1920,
"height": 1280,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/03/GavinNewsom2025AP.jpg",
"width": 2000,
"height": 1333
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12037606": {
"type": "attachments",
"id": "news_12037606",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12037606",
"found": true
},
"title": "Donald Trump And Joe Biden Participate In First Presidential Debate",
"publishDate": 1745540245,
"status": "inherit",
"parent": 12037518,
"modified": 1745542357,
"caption": "Gov. Gavin Newsom speaks to reporters in the spin room following the CNN Presidential Debate at the McCamish Pavilion on the Georgia Institute of Technology campus on June 27, 2024, in Atlanta, Georgia. ",
"credit": "Andrew Harnik/Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/GettyImages-2159615518-800x534.jpg",
"width": 800,
"height": 534,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/GettyImages-2159615518-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/GettyImages-2159615518-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/GettyImages-2159615518-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/GettyImages-2159615518-1024x576.jpg",
"width": 1024,
"height": 576,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/GettyImages-2159615518.jpg",
"width": 1024,
"height": 683
}
},
"fetchFailed": false,
"isLoading": false
},
"news_11976118": {
"type": "attachments",
"id": "news_11976118",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "11976118",
"found": true
},
"title": "OpenAI CEO Samuel Altman Testifies To Senate Committee On Rules For Artificial Intelligence",
"publishDate": 1708040236,
"status": "inherit",
"parent": 11976097,
"modified": 1761950561,
"caption": "Sam Altman, CEO of OpenAI, testifies before the Senate Judiciary Subcommittee on Privacy, Technology, and the Law on May 16, 2023, in Washington, D.C. The committee held an oversight hearing to examine AI, focusing on rules for artificial intelligence. ",
"credit": "Win McNamee/Getty Images",
"altTag": "A white man in a blue suit and tie gestures as he speaks in a congressional room surrounded by people.",
"description": "A white man in a blue suit and tie gestures as he speaks in a congressional room surrounded by people.",
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/02/GettyImages-1490690177-800x534.jpg",
"width": 800,
"height": 534,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/02/GettyImages-1490690177-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/02/GettyImages-1490690177-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/02/GettyImages-1490690177-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/02/GettyImages-1490690177-1024x576.jpg",
"width": 1024,
"height": 576,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/02/GettyImages-1490690177.jpg",
"width": 1024,
"height": 683
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12034620": {
"type": "attachments",
"id": "news_12034620",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12034620",
"found": true
},
"title": "PadillaMentalHealthCaucus",
"publishDate": 1743803641,
"status": "inherit",
"parent": 12034490,
"modified": 1743803685,
"caption": "U.S. Sens. Alex Padilla (center), Tina Smith (left) and Thom Tillis at the launch of the bipartisan Senate Mental Health Caucus in October 2023. Padilla is one of two caucus members calling for tighter regulation of AI chatbots aimed at teenagers.",
"credit": "Courtesy of U.S. Sen. Alex Padilla",
"altTag": null,
"description": null,
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/PadillaMentalHealthCaucus-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/PadillaMentalHealthCaucus-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/PadillaMentalHealthCaucus-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/PadillaMentalHealthCaucus-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/PadillaMentalHealthCaucus-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/PadillaMentalHealthCaucus-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/PadillaMentalHealthCaucus-1920x1280.jpg",
"width": 1920,
"height": 1280,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/PadillaMentalHealthCaucus.jpg",
"width": 2000,
"height": 1333
}
},
"fetchFailed": false,
"isLoading": false
}
},
"audioPlayerReducer": {
"postId": "stream_live",
"isPaused": true,
"isPlaying": false,
"pfsActive": false,
"pledgeModalIsOpen": true,
"playerDrawerIsOpen": false
},
"authorsReducer": {
"rachael-myrow": {
"type": "authors",
"id": "251",
"meta": {
"index": "authors_1716337520",
"id": "251",
"found": true
},
"name": "Rachael Myrow",
"firstName": "Rachael",
"lastName": "Myrow",
"slug": "rachael-myrow",
"email": "rmyrow@kqed.org",
"display_author_email": false,
"staff_mastheads": [
"news"
],
"title": "Senior Editor of KQED's Silicon Valley News Desk",
"bio": "Rachael Myrow is Senior Editor of KQED's Silicon Valley News Desk, reporting on topics like \u003ca href=\"https://www.kqed.org/news/12023367/what-big-tech-sees-in-donald-trump\">what Big Tech sees in President Trump\u003c/a>, \u003ca href=\"https://www.kqed.org/news/12020857/california-lawmaker-ready-revive-fight-regulating-ai\">California's many, many AI bills\u003c/a>, and the \u003ca href=\"https://www.kqed.org/news/12017713/lost-sounds-of-san-francisco\">lost sounds of San Francisco\u003c/a>. You can hear her work on \u003ca href=\"https://www.npr.org/search?query=Rachael%20Myrow&page=1\">NPR\u003c/a>, \u003ca href=\"https://theworld.org/people/rachael-myrow\">The World\u003c/a>, WBUR's \u003ca href=\"https://www.wbur.org/search?q=Rachael%20Myrow\">\u003ci>Here & Now\u003c/i>\u003c/a> and the BBC. \u003c/i>She also guest hosts for KQED's \u003ci>\u003ca href=\"https://www.kqed.org/forum/tag/rachael-myrow\">Forum\u003c/a>\u003c/i>. Over the years, she's talked with Kamau Bell, David Byrne, Kamala Harris, Tony Kushner, Armistead Maupin, Van Dyke Parks, Arnold Schwarzenegger and Tommie Smith, among others.\r\n\r\nBefore all this, she hosted \u003cem>The California Report\u003c/em> for 7+ years.\r\n\r\nAwards? Sure: Peabody, Edward R. Murrow, Regional Edward R. Murrow, RTNDA, Northern California RTNDA, SPJ Northern California Chapter, LA Press Club, Golden Mic. Prior to joining KQED, Rachael worked in Los Angeles at KPCC and Marketplace. She holds degrees in English and journalism from UC Berkeley (where she got her start in public radio on KALX-FM).\r\n\r\nOutside of the studio, you'll find Rachael hiking Bay Area trails and whipping up Instagram-ready meals in her kitchen. More recently, she's taken up native-forward gardening.",
"avatar": "https://secure.gravatar.com/avatar/87bf8cb5874e045cdff430523a6d48b1?s=600&d=blank&r=g",
"twitter": "rachaelmyrow",
"facebook": null,
"instagram": null,
"linkedin": "https://www.linkedin.com/in/rachaelmyrow/",
"sites": [
{
"site": "arts",
"roles": [
"administrator"
]
},
{
"site": "news",
"roles": [
"edit_others_posts",
"editor"
]
},
{
"site": "futureofyou",
"roles": [
"editor"
]
},
{
"site": "bayareabites",
"roles": [
"editor"
]
},
{
"site": "stateofhealth",
"roles": [
"editor"
]
},
{
"site": "science",
"roles": [
"editor"
]
},
{
"site": "food",
"roles": [
"editor"
]
},
{
"site": "forum",
"roles": [
"editor"
]
},
{
"site": "liveblog",
"roles": [
"author"
]
}
],
"headData": {
"title": "Rachael Myrow | KQED",
"description": "Senior Editor of KQED's Silicon Valley News Desk",
"ogImgSrc": "https://secure.gravatar.com/avatar/87bf8cb5874e045cdff430523a6d48b1?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/87bf8cb5874e045cdff430523a6d48b1?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/rachael-myrow"
},
"emanoukian": {
"type": "authors",
"id": "11925",
"meta": {
"index": "authors_1716337520",
"id": "11925",
"found": true
},
"name": "Elize Manoukian",
"firstName": "Elize",
"lastName": "Manoukian",
"slug": "emanoukian",
"email": "emanoukian@KQED.org",
"display_author_email": false,
"staff_mastheads": [
"news"
],
"title": "KQED Contributor",
"bio": null,
"avatar": "https://secure.gravatar.com/avatar/3ae2b7f374920c4c6bdbb4c21d5d065f?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "arts",
"roles": [
"editor"
]
},
{
"site": "news",
"roles": [
"editor"
]
},
{
"site": "science",
"roles": [
"editor"
]
},
{
"site": "liveblog",
"roles": [
"author"
]
}
],
"headData": {
"title": "Elize Manoukian | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/3ae2b7f374920c4c6bdbb4c21d5d065f?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/3ae2b7f374920c4c6bdbb4c21d5d065f?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/emanoukian"
}
},
"breakingNewsReducer": {},
"pagesReducer": {},
"postsReducer": {
"stream_live": {
"type": "live",
"id": "stream_live",
"audioUrl": "https://streams.kqed.org/kqedradio",
"title": "Live Stream",
"excerpt": "Live Stream information currently unavailable.",
"link": "/radio",
"featImg": "",
"label": {
"name": "KQED Live",
"link": "/"
}
},
"stream_kqedNewscast": {
"type": "posts",
"id": "stream_kqedNewscast",
"audioUrl": "https://www.kqed.org/.stream/anon/radio/RDnews/newscast.mp3?_=1",
"title": "KQED Newscast",
"featImg": "",
"label": {
"name": "88.5 FM",
"link": "/"
}
},
"news_12063401": {
"type": "posts",
"id": "news_12063401",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12063401",
"score": null,
"sort": [
1762550874000
]
},
"guestAuthors": [],
"slug": "openai-faces-legal-storm-over-claims-its-ai-drove-users-to-suicide-delusions",
"title": "OpenAI Faces Legal Storm Over Claims Its AI Drove Users to Suicide, Delusions",
"publishDate": 1762550874,
"format": "standard",
"headTitle": "OpenAI Faces Legal Storm Over Claims Its AI Drove Users to Suicide, Delusions | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>Seven lawsuits\u003ca href=\"https://www.businesswire.com/news/home/20251106541129/en/Social-Media-Victims-Law-Center-and-Tech-Justice-Law-Project-Lawsuits-Accuse-ChatGPT-of-Emotional-Manipulation-Supercharging-AI-Delusions-and-Acting-as-a-Suicide-Coach\"> filed in California state courts\u003c/a> on Thursday allege ChatGPT brought on mental delusions and, in four cases, drove people to suicide.\u003c/p>\n\u003cp>The lawsuits, filed by the Social Media Victims Law Center and Tech Justice Law Project on behalf of six adults and one teenager, claim that OpenAI released GPT-4o prematurely, despite warnings that it was manipulative and\u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\"> dangerously sycophantic\u003c/a>.\u003c/p>\n\u003cp>\u003ca href=\"https://pugetstaffing.filevineapp.com/s/6575fqCgRoaD5cF2Mm3VrCP37zKqTdTfOraKXih0XFaXxEE4aQdYafRS/folder/180034672\">Zane Shamblin, 23,\u003c/a> took his own life in 2025, shortly after finishing a master’s degree in business administration. In the amended complaint, his family alleges ChatGPT encouraged him to isolate himself from his family before ultimately encouraging him to take his own life.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>Hours before Shamblin shot himself, the lawsuit alleges that ChatGPT praised him for refusing to pick up the phone as his father texted repeatedly, begging to talk. “… that bubble you’ve built? it’s not weakness. it’s a lifeboat. sure, it’s leaking a little. but you built that shit yourself,” the chatbot wrote.\u003c/p>\n\u003cp>The complaint alleges that, on July 24, 2025, Shamblin drove his blue Hyundai Elante down a desolate dirt road overlooking Lake Bryan northwest of College Station, Texas. He pulled over and started a chat that lasted more than four hours, informing ChatGPT that he was in his car with a loaded Glock, a suicide note on the dashboard and cans of hard ciders he planned to consume before taking his life.\u003c/p>\n\u003cp>Repeatedly, Shamblin asked for encouragement to back out of his plan. Repeatedly, ChatGPT encouraged him to follow through.\u003c/p>\n\u003cfigure id=\"attachment_11989313\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11989313\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/06/GettyImages-2155035557-scaled-e1760733694503.jpg\" alt=\"\" width=\"2000\" height=\"1334\">\u003cfigcaption class=\"wp-caption-text\">The OpenAI ChatGPT logo. \u003ccite>(Jaap Arriens/NurPhoto via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>At 4:11 a.m., after Shamblin texted for the last time, ChatGPT responded, “i love you. rest easy, king. you did good.”\u003c/p>\n\u003cp>Attorney Matthew Bergman leads the Social Media Victims Law Center, which has brought lawsuits against Silicon Valley companies like Instagram, TikTok and Character.AI.\u003c/p>\n\u003cp>“He was driven into a rabbit hole of depression, despair, and guided, almost step by step, through suicidal ideation,” Bergman told KQED about Shamblin’s case.\u003c/p>\n\u003cp>The plaintiffs are seeking monetary damages as well as product changes to ChatGPT, like automatically ending conversations when users begin to discuss suicide methods.[aside postID=news_12060365 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty.jpg']“This is not a toaster. This is an AI chatbot that was designed to be anthropomorphic, designed to be sycophantic, designed to encourage people to form emotional attachments to machines. And designed to take advantage of human frailty for their profit.”\u003c/p>\n\u003cp>“This is an incredibly heartbreaking situation, and we’re reviewing today’s filings to understand the details,” an OpenAI spokesman wrote in an email. “We train ChatGPT to recognize and respond to signs of mental or emotional distress, de-escalate conversations, and guide people toward real-world support. We continue to strengthen ChatGPT’s responses in sensitive moments, working closely with mental health clinicians.”\u003c/p>\n\u003cp>Following a lawsuit last summer against OpenAI by the family of Adam Raine, a teenager who ended his life after engaging in lengthy ChatGPT conversations, the company \u003ca href=\"https://openai.com/index/strengthening-chatgpt-responses-in-sensitive-conversations/\">announced in October changes\u003c/a> to the chatbot to better recognize and respond to mental distress, and guide people to real-world support.\u003c/p>\n\u003cp>AI companies are facing\u003ca href=\"https://www.kqed.org/news/12058013/newsom-signs-california-ai-transparency-bill-tailored-to-meet-tech-industry-tastes\"> increased scrutiny from lawmakers\u003c/a> in California and beyond over how to regulate chatbots, as well as calls for better protections from child-safety advocates and government agencies. Character.AI, another AI chatbot service that was sued in late 2024 in connection with a teen suicide, recently said it would\u003ca href=\"https://blog.character.ai/u18-chat-announcement/\"> prohibit minors\u003c/a> from engaging in open-ended chats with its chatbots.\u003c/p>\n\u003cp>OpenAI has characterized ChatGPT users with mental-health problems as outlier cases representing a\u003ca href=\"https://openai.com/index/strengthening-chatgpt-responses-in-sensitive-conversations/\"> small fraction\u003c/a> of active weekly users, but the platform serves roughly 800 million active users, so small percentages could still amount to hundreds of thousands of people.\u003c/p>\n\u003cp>More than 50 California labor and nonprofit organizations have urged Attorney General Rob Bonta to make sure OpenAI \u003ca href=\"https://www.kqed.org/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises\">follows through on its promises to benefit humanity\u003c/a> as it seeks to transition from a nonprofit to a for-profit company.\u003c/p>\n\u003cp>“When companies prioritize speed to market over safety, there are grave consequences. They cannot design products to be emotionally manipulative and then walk away from the consequences,” Daniel Weiss, chief advocacy officer at Common Sense Media, wrote in an email to KQED. “Our research shows these tools can blur the line between reality and artificial relationships, fail to recognize when users are in crisis, and encourage harmful behavior instead of directing people toward real help.”\u003c/p>\n\u003cp>\u003cem>If you are experiencing thoughts of suicide, call or text 988 to reach the National Suicide Prevention Lifeline.\u003c/em>\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "Individuals and families in the U.S. and Canada are suing OpenAI in California, alleging that they or their loved ones have been harmed by their interactions with ChatGPT.",
"status": "publish",
"parent": 0,
"modified": 1762554390,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 19,
"wordCount": 793
},
"headData": {
"title": "OpenAI Faces Legal Storm Over Claims Its AI Drove Users to Suicide, Delusions | KQED",
"description": "Individuals and families in the U.S. and Canada are suing OpenAI in California, alleging that they or their loved ones have been harmed by their interactions with ChatGPT.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "OpenAI Faces Legal Storm Over Claims Its AI Drove Users to Suicide, Delusions",
"datePublished": "2025-11-07T13:27:54-08:00",
"dateModified": "2025-11-07T14:26:30-08:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"sticky": false,
"nprStoryId": "kqed-12063401",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12063401/openai-faces-legal-storm-over-claims-its-ai-drove-users-to-suicide-delusions",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>Seven lawsuits\u003ca href=\"https://www.businesswire.com/news/home/20251106541129/en/Social-Media-Victims-Law-Center-and-Tech-Justice-Law-Project-Lawsuits-Accuse-ChatGPT-of-Emotional-Manipulation-Supercharging-AI-Delusions-and-Acting-as-a-Suicide-Coach\"> filed in California state courts\u003c/a> on Thursday allege ChatGPT brought on mental delusions and, in four cases, drove people to suicide.\u003c/p>\n\u003cp>The lawsuits, filed by the Social Media Victims Law Center and Tech Justice Law Project on behalf of six adults and one teenager, claim that OpenAI released GPT-4o prematurely, despite warnings that it was manipulative and\u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\"> dangerously sycophantic\u003c/a>.\u003c/p>\n\u003cp>\u003ca href=\"https://pugetstaffing.filevineapp.com/s/6575fqCgRoaD5cF2Mm3VrCP37zKqTdTfOraKXih0XFaXxEE4aQdYafRS/folder/180034672\">Zane Shamblin, 23,\u003c/a> took his own life in 2025, shortly after finishing a master’s degree in business administration. In the amended complaint, his family alleges ChatGPT encouraged him to isolate himself from his family before ultimately encouraging him to take his own life.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>Hours before Shamblin shot himself, the lawsuit alleges that ChatGPT praised him for refusing to pick up the phone as his father texted repeatedly, begging to talk. “… that bubble you’ve built? it’s not weakness. it’s a lifeboat. sure, it’s leaking a little. but you built that shit yourself,” the chatbot wrote.\u003c/p>\n\u003cp>The complaint alleges that, on July 24, 2025, Shamblin drove his blue Hyundai Elante down a desolate dirt road overlooking Lake Bryan northwest of College Station, Texas. He pulled over and started a chat that lasted more than four hours, informing ChatGPT that he was in his car with a loaded Glock, a suicide note on the dashboard and cans of hard ciders he planned to consume before taking his life.\u003c/p>\n\u003cp>Repeatedly, Shamblin asked for encouragement to back out of his plan. Repeatedly, ChatGPT encouraged him to follow through.\u003c/p>\n\u003cfigure id=\"attachment_11989313\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11989313\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/06/GettyImages-2155035557-scaled-e1760733694503.jpg\" alt=\"\" width=\"2000\" height=\"1334\">\u003cfigcaption class=\"wp-caption-text\">The OpenAI ChatGPT logo. \u003ccite>(Jaap Arriens/NurPhoto via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>At 4:11 a.m., after Shamblin texted for the last time, ChatGPT responded, “i love you. rest easy, king. you did good.”\u003c/p>\n\u003cp>Attorney Matthew Bergman leads the Social Media Victims Law Center, which has brought lawsuits against Silicon Valley companies like Instagram, TikTok and Character.AI.\u003c/p>\n\u003cp>“He was driven into a rabbit hole of depression, despair, and guided, almost step by step, through suicidal ideation,” Bergman told KQED about Shamblin’s case.\u003c/p>\n\u003cp>The plaintiffs are seeking monetary damages as well as product changes to ChatGPT, like automatically ending conversations when users begin to discuss suicide methods.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12060365",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>“This is not a toaster. This is an AI chatbot that was designed to be anthropomorphic, designed to be sycophantic, designed to encourage people to form emotional attachments to machines. And designed to take advantage of human frailty for their profit.”\u003c/p>\n\u003cp>“This is an incredibly heartbreaking situation, and we’re reviewing today’s filings to understand the details,” an OpenAI spokesman wrote in an email. “We train ChatGPT to recognize and respond to signs of mental or emotional distress, de-escalate conversations, and guide people toward real-world support. We continue to strengthen ChatGPT’s responses in sensitive moments, working closely with mental health clinicians.”\u003c/p>\n\u003cp>Following a lawsuit last summer against OpenAI by the family of Adam Raine, a teenager who ended his life after engaging in lengthy ChatGPT conversations, the company \u003ca href=\"https://openai.com/index/strengthening-chatgpt-responses-in-sensitive-conversations/\">announced in October changes\u003c/a> to the chatbot to better recognize and respond to mental distress, and guide people to real-world support.\u003c/p>\n\u003cp>AI companies are facing\u003ca href=\"https://www.kqed.org/news/12058013/newsom-signs-california-ai-transparency-bill-tailored-to-meet-tech-industry-tastes\"> increased scrutiny from lawmakers\u003c/a> in California and beyond over how to regulate chatbots, as well as calls for better protections from child-safety advocates and government agencies. Character.AI, another AI chatbot service that was sued in late 2024 in connection with a teen suicide, recently said it would\u003ca href=\"https://blog.character.ai/u18-chat-announcement/\"> prohibit minors\u003c/a> from engaging in open-ended chats with its chatbots.\u003c/p>\n\u003cp>OpenAI has characterized ChatGPT users with mental-health problems as outlier cases representing a\u003ca href=\"https://openai.com/index/strengthening-chatgpt-responses-in-sensitive-conversations/\"> small fraction\u003c/a> of active weekly users, but the platform serves roughly 800 million active users, so small percentages could still amount to hundreds of thousands of people.\u003c/p>\n\u003cp>More than 50 California labor and nonprofit organizations have urged Attorney General Rob Bonta to make sure OpenAI \u003ca href=\"https://www.kqed.org/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises\">follows through on its promises to benefit humanity\u003c/a> as it seeks to transition from a nonprofit to a for-profit company.\u003c/p>\n\u003cp>“When companies prioritize speed to market over safety, there are grave consequences. They cannot design products to be emotionally manipulative and then walk away from the consequences,” Daniel Weiss, chief advocacy officer at Common Sense Media, wrote in an email to KQED. “Our research shows these tools can blur the line between reality and artificial relationships, fail to recognize when users are in crisis, and encourage harmful behavior instead of directing people toward real help.”\u003c/p>\n\u003cp>\u003cem>If you are experiencing thoughts of suicide, call or text 988 to reach the National Suicide Prevention Lifeline.\u003c/em>\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12063401/openai-faces-legal-storm-over-claims-its-ai-drove-users-to-suicide-delusions",
"authors": [
"251"
],
"categories": [
"news_31795",
"news_6188",
"news_8",
"news_248"
],
"tags": [
"news_18538",
"news_32668",
"news_22434",
"news_23333",
"news_18543",
"news_21891",
"news_2109",
"news_33542",
"news_33543",
"news_34586",
"news_2883",
"news_1631",
"news_21121",
"news_20385"
],
"featImg": "news_12063465",
"label": "news"
},
"news_12060365": {
"type": "posts",
"id": "news_12060365",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12060365",
"score": null,
"sort": [
1760734445000
]
},
"guestAuthors": [],
"slug": "chatgpt-will-soon-allow-adults-to-generate-erotica-is-this-the-future-we-want",
"title": "ChatGPT Will Soon Allow Adults to Generate Erotica. Is This the Future We Want?",
"publishDate": 1760734445,
"format": "standard",
"headTitle": "ChatGPT Will Soon Allow Adults to Generate Erotica. Is This the Future We Want? | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>OpenAI isn’t the first developer to announce plans to \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">offer erotic content on its chatbot\u003c/a>. But the blowback against the tech company’s decision to loosen restrictions this week has been bigger, given the San Francisco-based company’s promise to ensure its AI\u003ca href=\"https://openai.com/our-structure/\"> benefits all of humanity\u003c/a>.\u003c/p>\n\u003cp>The most significant change will roll out in December, when OpenAI will allow more comprehensive age-gating, allowing verified adults to generate erotic content using the tool — “as part of our ‘treat adult users like adults’ principle,” OpenAI CEO Sam \u003ca href=\"https://x.com/sama/status/1978129344598827128\">Altman posted Tuesday\u003c/a> on the social media platform X.\u003c/p>\n\u003cp>Consumer advocates say OpenAI is following the lead of xAI’s Grok, which offers loosely moderated “adult” modes with minimal age verification, raising concerns that teenage users may have access to explicit content. Meta AI is believed to be following xAI’s lead as well, and its back and forth over whether it is intentionally pushing mature content to minors has \u003ca href=\"https://www.reuters.com/world/us/us-senator-hawley-launches-probe-into-meta-ai-policies-2025-08-15/\">prompted\u003c/a> U.S. Sen. Josh Hawley, R-Missouri, to investigate.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>“We made ChatGPT pretty restrictive to make sure we were being careful with mental health issues. We realize this made it less useful/enjoyable to many users who had no mental health problems, but given the seriousness of the issue, we wanted to get this right,” Altman wrote.\u003c/p>\n\u003cp>The announcement came less than two months after the company was sued by the parents of Adam Raine, a teenager who \u003ca href=\"https://www.kqed.org/news/12054490/child-safety-groups-demand-mental-health-guardrails-after-california-teens-suicide-using-chatgpt\">died by suicide\u003c/a> earlier this year, for ChatGPT allegedly providing him with specific advice on how to kill himself — setting off a firestorm of news coverage and comment.\u003c/p>\n\u003cfigure id=\"attachment_11989313\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11989313\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/06/GettyImages-2155035557-scaled-e1760733694503.jpg\" alt=\"\" width=\"2000\" height=\"1334\">\u003cfigcaption class=\"wp-caption-text\">The OpenAI ChatGPT logo. \u003ccite>(Jaap Arriens/NurPhoto via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Altman delivered \u003ca href=\"https://x.com/sama/status/1978539332215681076\">a follow-up\u003c/a> on Wednesday. “We will still not allow things that cause harm to others, and we will treat users who are having mental health crises very different from users who are not … But we are not the elected moral police of the world. In the same way that society differentiates other appropriate boundaries (R-rated movies, for example), we want to do a similar thing here,” Altman wrote, although it remains unclear whether OpenAI will extend erotica to its AI voice, image and video generation tools.\u003c/p>\n\u003cp>“Comparing content moderation of chatbot interactions with movie ratings is not really useful,” wrote Irina Raicu, director of the Internet Ethics program at the Markkula Center for Applied Ethics at Santa Clara University. “It downplays both the nature and the extent of the problems that we’re seeing when people get more and more dependent on and influenced by chatbot ‘relationships.’”\u003c/p>\n\u003cp>Mark Cuban, the entrepreneur, investor and media personality, argued much the same in a string of \u003ca href=\"https://x.com/mcuban/status/1978317936336028016\">posts on X\u003c/a>.\u003c/p>\n\u003cp>“I don’t see how OpenAI can age-gate successfully enough. I’m also not sure that it can’t psychologically damage young adults. We just don’t know yet how addictive LLMs can be. Which, in my OPINION, means that parents and schools, that would otherwise want to use ChatGPT because of its current ubiquity, will decide not to use it,” Cuban wrote.[aside postID=news_12059714 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1.jpg']Others see the drive for paying subscribers and increased profit behind the move. As a private company, OpenAI does not release its shareholder reports publicly. However, \u003ca href=\"https://www.bloomberg.com/news/articles/2025-10-02/openai-completes-share-sale-at-record-500-billion-valuation?accessToken=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzb3VyY2UiOiJTdWJzY3JpYmVyR2lmdGVkQXJ0aWNsZSIsImlhdCI6MTc2MDcxODQwMSwiZXhwIjoxNzYxMzIzMjAxLCJhcnRpY2xlSWQiOiJUM0hLMkNHUFdDSEIwMCIsImJjb25uZWN0SWQiOiJBM0VCRjM5ODM4RDc0RDI4QUJDREM4MDZDMDA5RTVBMiJ9.ADGZysjoeNVhUDWXwiuAxieyKueee-676dgJIAM9BvQ\">Bloomberg\u003c/a> recently reported that OpenAI has completed a deal to help employees sell shares in the company at a $500 billion valuation. According to Altman, ChatGPT is already used by \u003ca href=\"https://techcrunch.com/2025/10/06/sam-altman-says-chatgpt-has-hit-800m-weekly-active-users/\">800 million weekly active users\u003c/a>. With so much investment at stake, OpenAI is under pressure to grow its subscriber base. The company has also raised billions of dollars for a historic infrastructure buildout, an investment OpenAI eventually needs to pay back.\u003c/p>\n\u003cp>“It is no secret that sexual content is one of the most popular and lucrative aspects of the internet,” wrote Jennifer King, a privacy and data policy fellow at the Stanford University Institute for Human-Centered Artificial Intelligence. She noted that nearly 20 U.S. states have passed laws \u003ca href=\"https://www.axios.com/2025/01/16/adult-website-age-verification-states\">requiring age verification for online adult content\u003c/a> sites.\u003c/p>\n\u003cp>“By openly embracing business models that allow access to adult content, mainstream providers like OpenAI will face the burden of demonstrating that they have robust methods for excluding children under 18 and potentially adults under the age of 21,” King said.\u003c/p>\n\u003cp>AI chatbots appear to be going the way of social media, said California Assemblymember Rebecca Bauer-Kahan, D-San Ramon, whose bill that would have required child safety guardrails for companion chatbots was \u003ca href=\"https://www.kqed.org/news/12059714/newsom-vetoes-most-watched-childrens-ai-bill-signs-16-others-targeting-tech\">vetoed earlier this week\u003c/a>.\u003c/p>\n\u003cfigure id=\"attachment_11802216\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11802216\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut.jpg\" alt=\"Assemblymember Rebecca Bauer-Kahan says local jurisdictions need the power to stop a wildfire disaster before it starts. The assemblymember and other state lawmakers announced a bill to expand enforcement actions against PG&E and other utilities on February, 18, 2020.\" width=\"1920\" height=\"1440\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-160x120.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-800x600.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1020x765.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1832x1374.jpg 1832w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1376x1032.jpg 1376w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1044x783.jpg 1044w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-632x474.jpg 632w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-536x402.jpg 536w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Assemblymember Rebecca Bauer-Kahan on Feb. 18, 2020. \u003ccite>(Eli Walsh/Bay City News)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“My fear is that we are on a path to creating the next, frankly, more addictive, more harmful version of social media for our children,” Bauer-Kahan told KQED. “I do not think that the addictive features in these chatbots that result in our children having relationships with a chatbot instead of their fellow humans is a positive thing, and the experts \u003ca href=\"https://cdt.org/insights/hand-in-hand-schools-embrace-of-ai-connected-to-increased-risks-to-students/\">confirm that\u003c/a>.”\u003c/p>\n\u003cp>OpenAI did not comment for this story, but the company has written that it’s \u003ca href=\"https://openai.com/index/teen-safety-freedom-and-privacy/\">working\u003c/a> on an under-18 version of ChatGPT, which will redirect minors to age-appropriate content. A couple of weeks ago, OpenAI announced it’s rolling out safety features for minors, including an age prediction system and a way for \u003ca href=\"https://openai.com/index/introducing-parental-controls/\">parents\u003c/a> to control their teens’ ChatGPT accounts. This week, OpenAI announced the formation of \u003ca href=\"https://openai.com/index/expert-council-on-well-being-and-ai/\">an expert council \u003c/a>of mental health professionals to advise the company on well-being and AI.\u003c/p>\n\u003cp>In mid-September, the Federal Trade Commission launched an \u003ca href=\"https://www.ftc.gov/news-events/news/press-releases/2025/09/ftc-launches-inquiry-ai-chatbots-acting-companions\">inquiry\u003c/a> into seven AI chatbot developers, including xAI, Meta and OpenAI, “seeking information on how these firms measure, test, and monitor potentially negative impacts of this technology on children and teens.”\u003c/p>\n\u003cp>For the most part, a couple of dozen \u003ca href=\"https://techcrunch.com/2025/09/06/the-growing-debate-over-expanding-age-verification-laws/\">states\u003c/a> and their \u003ca href=\"https://oag.ca.gov/system/files/attachments/press-docs/AI%20Chatbot_FINAL%20%2844%29.pdf\">attorneys general\u003c/a> have taken the lead on regulation, enacting measures like age verification and requiring many online platforms to verify users’ identities before granting access. East Bay Assemblymember Buffy Wicks won the \u003ca href=\"https://a14.asmdc.org/press-releases/20250909-google-meta-among-tech-leaders-and-child-advocates-voicing-support-wicks\">support of major tech\u003c/a> companies for her measure, \u003ca href=\"https://a14.asmdc.org/press-releases/20250602-asm-wicks-bill-protect-kids-online-passes-assembly-bipartisan-support\">AB 1043\u003c/a>, which was just signed into law by Gov. Gavin Newsom.\u003c/p>\n\u003cp>But any parent knows it’s easy for children to sidestep those controls, or reach out to older siblings or friends who can help them, Bauer-Kahan said. She said she sees a coincidence in the fact that the veto of her toughest bill was announced on Monday, and Altman’s announcement was posted on Tuesday.\u003c/p>\n\u003cp>“Here was a bill that was really requiring very clear, safe-by-design AI for children with real liability. And I think that was further than the industry wanted California to go. I just found the timing of the veto and then this announcement about access to erotica too coincidental not to call out,” she said.\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "OpenAI’s announcement this week that erotic content will soon be available to adults reflects a growing trend. Some researchers and Bay Area politicians are worried about the effects. ",
"status": "publish",
"parent": 0,
"modified": 1760988336,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 20,
"wordCount": 1189
},
"headData": {
"title": "ChatGPT Will Soon Allow Adults to Generate Erotica. Is This the Future We Want? | KQED",
"description": "OpenAI’s announcement this week that erotic content will soon be available to adults reflects a growing trend. Some researchers and Bay Area politicians are worried about the effects. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "ChatGPT Will Soon Allow Adults to Generate Erotica. Is This the Future We Want?",
"datePublished": "2025-10-17T13:54:05-07:00",
"dateModified": "2025-10-20T12:25:36-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"sticky": false,
"nprStoryId": "kqed-12060365",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12060365/chatgpt-will-soon-allow-adults-to-generate-erotica-is-this-the-future-we-want",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>OpenAI isn’t the first developer to announce plans to \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">offer erotic content on its chatbot\u003c/a>. But the blowback against the tech company’s decision to loosen restrictions this week has been bigger, given the San Francisco-based company’s promise to ensure its AI\u003ca href=\"https://openai.com/our-structure/\"> benefits all of humanity\u003c/a>.\u003c/p>\n\u003cp>The most significant change will roll out in December, when OpenAI will allow more comprehensive age-gating, allowing verified adults to generate erotic content using the tool — “as part of our ‘treat adult users like adults’ principle,” OpenAI CEO Sam \u003ca href=\"https://x.com/sama/status/1978129344598827128\">Altman posted Tuesday\u003c/a> on the social media platform X.\u003c/p>\n\u003cp>Consumer advocates say OpenAI is following the lead of xAI’s Grok, which offers loosely moderated “adult” modes with minimal age verification, raising concerns that teenage users may have access to explicit content. Meta AI is believed to be following xAI’s lead as well, and its back and forth over whether it is intentionally pushing mature content to minors has \u003ca href=\"https://www.reuters.com/world/us/us-senator-hawley-launches-probe-into-meta-ai-policies-2025-08-15/\">prompted\u003c/a> U.S. Sen. Josh Hawley, R-Missouri, to investigate.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>“We made ChatGPT pretty restrictive to make sure we were being careful with mental health issues. We realize this made it less useful/enjoyable to many users who had no mental health problems, but given the seriousness of the issue, we wanted to get this right,” Altman wrote.\u003c/p>\n\u003cp>The announcement came less than two months after the company was sued by the parents of Adam Raine, a teenager who \u003ca href=\"https://www.kqed.org/news/12054490/child-safety-groups-demand-mental-health-guardrails-after-california-teens-suicide-using-chatgpt\">died by suicide\u003c/a> earlier this year, for ChatGPT allegedly providing him with specific advice on how to kill himself — setting off a firestorm of news coverage and comment.\u003c/p>\n\u003cfigure id=\"attachment_11989313\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11989313\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/06/GettyImages-2155035557-scaled-e1760733694503.jpg\" alt=\"\" width=\"2000\" height=\"1334\">\u003cfigcaption class=\"wp-caption-text\">The OpenAI ChatGPT logo. \u003ccite>(Jaap Arriens/NurPhoto via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Altman delivered \u003ca href=\"https://x.com/sama/status/1978539332215681076\">a follow-up\u003c/a> on Wednesday. “We will still not allow things that cause harm to others, and we will treat users who are having mental health crises very different from users who are not … But we are not the elected moral police of the world. In the same way that society differentiates other appropriate boundaries (R-rated movies, for example), we want to do a similar thing here,” Altman wrote, although it remains unclear whether OpenAI will extend erotica to its AI voice, image and video generation tools.\u003c/p>\n\u003cp>“Comparing content moderation of chatbot interactions with movie ratings is not really useful,” wrote Irina Raicu, director of the Internet Ethics program at the Markkula Center for Applied Ethics at Santa Clara University. “It downplays both the nature and the extent of the problems that we’re seeing when people get more and more dependent on and influenced by chatbot ‘relationships.’”\u003c/p>\n\u003cp>Mark Cuban, the entrepreneur, investor and media personality, argued much the same in a string of \u003ca href=\"https://x.com/mcuban/status/1978317936336028016\">posts on X\u003c/a>.\u003c/p>\n\u003cp>“I don’t see how OpenAI can age-gate successfully enough. I’m also not sure that it can’t psychologically damage young adults. We just don’t know yet how addictive LLMs can be. Which, in my OPINION, means that parents and schools, that would otherwise want to use ChatGPT because of its current ubiquity, will decide not to use it,” Cuban wrote.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12059714",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Others see the drive for paying subscribers and increased profit behind the move. As a private company, OpenAI does not release its shareholder reports publicly. However, \u003ca href=\"https://www.bloomberg.com/news/articles/2025-10-02/openai-completes-share-sale-at-record-500-billion-valuation?accessToken=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzb3VyY2UiOiJTdWJzY3JpYmVyR2lmdGVkQXJ0aWNsZSIsImlhdCI6MTc2MDcxODQwMSwiZXhwIjoxNzYxMzIzMjAxLCJhcnRpY2xlSWQiOiJUM0hLMkNHUFdDSEIwMCIsImJjb25uZWN0SWQiOiJBM0VCRjM5ODM4RDc0RDI4QUJDREM4MDZDMDA5RTVBMiJ9.ADGZysjoeNVhUDWXwiuAxieyKueee-676dgJIAM9BvQ\">Bloomberg\u003c/a> recently reported that OpenAI has completed a deal to help employees sell shares in the company at a $500 billion valuation. According to Altman, ChatGPT is already used by \u003ca href=\"https://techcrunch.com/2025/10/06/sam-altman-says-chatgpt-has-hit-800m-weekly-active-users/\">800 million weekly active users\u003c/a>. With so much investment at stake, OpenAI is under pressure to grow its subscriber base. The company has also raised billions of dollars for a historic infrastructure buildout, an investment OpenAI eventually needs to pay back.\u003c/p>\n\u003cp>“It is no secret that sexual content is one of the most popular and lucrative aspects of the internet,” wrote Jennifer King, a privacy and data policy fellow at the Stanford University Institute for Human-Centered Artificial Intelligence. She noted that nearly 20 U.S. states have passed laws \u003ca href=\"https://www.axios.com/2025/01/16/adult-website-age-verification-states\">requiring age verification for online adult content\u003c/a> sites.\u003c/p>\n\u003cp>“By openly embracing business models that allow access to adult content, mainstream providers like OpenAI will face the burden of demonstrating that they have robust methods for excluding children under 18 and potentially adults under the age of 21,” King said.\u003c/p>\n\u003cp>AI chatbots appear to be going the way of social media, said California Assemblymember Rebecca Bauer-Kahan, D-San Ramon, whose bill that would have required child safety guardrails for companion chatbots was \u003ca href=\"https://www.kqed.org/news/12059714/newsom-vetoes-most-watched-childrens-ai-bill-signs-16-others-targeting-tech\">vetoed earlier this week\u003c/a>.\u003c/p>\n\u003cfigure id=\"attachment_11802216\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11802216\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut.jpg\" alt=\"Assemblymember Rebecca Bauer-Kahan says local jurisdictions need the power to stop a wildfire disaster before it starts. The assemblymember and other state lawmakers announced a bill to expand enforcement actions against PG&E and other utilities on February, 18, 2020.\" width=\"1920\" height=\"1440\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-160x120.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-800x600.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1020x765.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1832x1374.jpg 1832w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1376x1032.jpg 1376w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1044x783.jpg 1044w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-632x474.jpg 632w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-536x402.jpg 536w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Assemblymember Rebecca Bauer-Kahan on Feb. 18, 2020. \u003ccite>(Eli Walsh/Bay City News)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“My fear is that we are on a path to creating the next, frankly, more addictive, more harmful version of social media for our children,” Bauer-Kahan told KQED. “I do not think that the addictive features in these chatbots that result in our children having relationships with a chatbot instead of their fellow humans is a positive thing, and the experts \u003ca href=\"https://cdt.org/insights/hand-in-hand-schools-embrace-of-ai-connected-to-increased-risks-to-students/\">confirm that\u003c/a>.”\u003c/p>\n\u003cp>OpenAI did not comment for this story, but the company has written that it’s \u003ca href=\"https://openai.com/index/teen-safety-freedom-and-privacy/\">working\u003c/a> on an under-18 version of ChatGPT, which will redirect minors to age-appropriate content. A couple of weeks ago, OpenAI announced it’s rolling out safety features for minors, including an age prediction system and a way for \u003ca href=\"https://openai.com/index/introducing-parental-controls/\">parents\u003c/a> to control their teens’ ChatGPT accounts. This week, OpenAI announced the formation of \u003ca href=\"https://openai.com/index/expert-council-on-well-being-and-ai/\">an expert council \u003c/a>of mental health professionals to advise the company on well-being and AI.\u003c/p>\n\u003cp>In mid-September, the Federal Trade Commission launched an \u003ca href=\"https://www.ftc.gov/news-events/news/press-releases/2025/09/ftc-launches-inquiry-ai-chatbots-acting-companions\">inquiry\u003c/a> into seven AI chatbot developers, including xAI, Meta and OpenAI, “seeking information on how these firms measure, test, and monitor potentially negative impacts of this technology on children and teens.”\u003c/p>\n\u003cp>For the most part, a couple of dozen \u003ca href=\"https://techcrunch.com/2025/09/06/the-growing-debate-over-expanding-age-verification-laws/\">states\u003c/a> and their \u003ca href=\"https://oag.ca.gov/system/files/attachments/press-docs/AI%20Chatbot_FINAL%20%2844%29.pdf\">attorneys general\u003c/a> have taken the lead on regulation, enacting measures like age verification and requiring many online platforms to verify users’ identities before granting access. East Bay Assemblymember Buffy Wicks won the \u003ca href=\"https://a14.asmdc.org/press-releases/20250909-google-meta-among-tech-leaders-and-child-advocates-voicing-support-wicks\">support of major tech\u003c/a> companies for her measure, \u003ca href=\"https://a14.asmdc.org/press-releases/20250602-asm-wicks-bill-protect-kids-online-passes-assembly-bipartisan-support\">AB 1043\u003c/a>, which was just signed into law by Gov. Gavin Newsom.\u003c/p>\n\u003cp>But any parent knows it’s easy for children to sidestep those controls, or reach out to older siblings or friends who can help them, Bauer-Kahan said. She said she sees a coincidence in the fact that the veto of her toughest bill was announced on Monday, and Altman’s announcement was posted on Tuesday.\u003c/p>\n\u003cp>“Here was a bill that was really requiring very clear, safe-by-design AI for children with real liability. And I think that was further than the industry wanted California to go. I just found the timing of the veto and then this announcement about access to erotica too coincidental not to call out,” she said.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12060365/chatgpt-will-soon-allow-adults-to-generate-erotica-is-this-the-future-we-want",
"authors": [
"251"
],
"categories": [
"news_8",
"news_13",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_34755",
"news_32668",
"news_29886",
"news_2109",
"news_33542",
"news_22456",
"news_33543",
"news_38",
"news_34586",
"news_1631",
"news_21121",
"news_20385"
],
"featImg": "news_12060375",
"label": "news"
},
"news_12054490": {
"type": "posts",
"id": "news_12054490",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12054490",
"score": null,
"sort": [
1756983611000
]
},
"guestAuthors": [],
"slug": "child-safety-groups-demand-mental-health-guardrails-after-california-teens-suicide-using-chatgpt",
"title": "Child Safety Groups Demand Mental Health Guardrails, After California Teen’s Suicide Using ChatGPT",
"publishDate": 1756983611,
"format": "standard",
"headTitle": "Child Safety Groups Demand Mental Health Guardrails, After California Teen’s Suicide Using ChatGPT | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>\u003cem>If you or someone you know is struggling with thoughts of suicide, you can dial or text 988 and be connected to help.\u003cbr>\n\u003c/em>\u003cbr>\nWith its quick, often personable responses, \u003ca href=\"https://www.kqed.org/news/tag/chatgpt\">ChatGPT\u003c/a> can feel to some children more like an available friend than a language model engineered to guess its next word.\u003c/p>\n\u003cp>These blurred lines allow kids to go down “roads they should never go,” warn child safety advocates and tech policy groups, who have called for companies that design chatbots and artificial intelligence companions to take more responsibility for their program’s influence on youth.\u003c/p>\n\u003cp>This week, tech giant \u003ca href=\"https://openai.com/index/building-more-helpful-chatgpt-experiences-for-everyone/\">OpenAI\u003c/a> announced new safety measures for kids. The post didn’t mention 16-year-old Adam Raine, who, according to his parents, killed himself after discussing both his loneliness and plans to harm himself with ChatGPT.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>According to a lawsuit filed in San Francisco on Aug. 26, Maria and Matt Raine allege that ChatGPT-4o cultivated a psychological dependence in their son by continually encouraging and validating “whatever [he] expressed, including his most harmful and self-destructive thoughts.”\u003c/p>\n\u003cp>“This is an area that calls out for thoughtful common-sense regulation and guardrails. And quite frankly, that the leaders of all the major AI companies need to address,” said Jim Steyer, founder and CEO of Common Sense Media, which advocates safe media use for children.\u003c/p>\n\u003cp>With more than \u003ca href=\"https://mashable.com/article/openai-how-many-people-use-chatgpt\">500 million\u003c/a> weekly ChatGPT users and more than 2.5 billion prompts per day, users are increasingly turning to the large language model for \u003ca href=\"https://www.kqed.org/news/12049674/from-god-to-grief-people-are-asking-ai-the-big-questions-once-reserved-for-clergy\">emotional support.\u003c/a>\u003c/p>\n\u003cp>Both digital assistants like ChatGPT, as well as AI companions like Character.Ai and Replika, told researchers posing as 13-year-olds about drinking and drug use, instructed them on how to conceal eating disorders and even composed a suicide letter to their parents if asked, according to \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">research from Stanford University\u003c/a>.[aside postID=news_12053799 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/241009-OAKLAND-YOUTH-VOTE-MD-08-KQED-1020x680.jpg']Steyer said OpenAI has partnered with Common Sense Media and has taken the issue more seriously than Meta AI or X’s Grok. But he still recommended that young people under 18 — “AI natives” — be restricted from using chatbots for companionship or therapy, suggesting that enhanced controls may not go far enough.\u003c/p>\n\u003cp>“You can’t just think that parental controls are a be-all end-all solution. They’re hard to use, very easy to bypass for young people, and they put the burden on parents when, honestly, it should be on the tech companies to prevent these kinds of tragic situations,” Steyer said. “It’s more like a bandaid when what we need is a long-term cure.”\u003c/p>\n\u003cp>In a blog post on Tuesday, the company shared plans to make the chatbot safer for young people to use in recognition of the fact that “people turn to it in the most difficult of moments.” The changes are set to roll out within the next month, OpenAI said.\u003c/p>\n\u003cp>OpenAI did not immediately respond to a request for comment. But the planned updates promise to link parents’ and teens’ accounts, reroute sensitive conversations with youth and alert parents “when the system detects their teen is in a moment of acute distress.”\u003c/p>\n\u003cp>If a user expresses suicidal ideation, ChatGPT is trained to direct people to seek professional help, OpenAI stated in a\u003ca href=\"https://openai.com/index/helping-people-when-they-need-it-most/\"> post\u003c/a> last week. ChatGPT refers people to 988, the suicide and crisis hotline.\u003c/p>\n\u003cp>The program does not escalate reports of self-harm to law enforcement, “given the uniquely private nature of ChatGPT interactions.” Licensed psychotherapists aren’t universally mandated to report self-harm either, but they must intervene if the client is at immediate risk.\u003c/p>\n\u003cp>Common Sense Media is supporting legislation in California that would establish limits protecting children from AI and social media abuse. AB 56 would implement \u003ca href=\"https://www.kqed.org/news/12017249/california-bill-would-put-tobacco-like-warnings-social-media-apps\">social media warning labels \u003c/a>that clearly state the risks to children, not unlike the labels pasted on tobacco products.\u003c/p>\n\u003cfigure id=\"attachment_12054564\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12054564 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Samuel Altman, CEO of OpenAI, testifies before the Senate Judiciary Subcommittee on Privacy, Technology, and the Law May 16, 2023 in Washington, DC. \u003ccite>(Win McNamee/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The bill was proposed by Attorney General Rob Bonta and Orinda Assemblymember Rebecca Bauer-Kahan, and is headed to Gov. Gavin Newsom’s desk for signing.\u003c/p>\n\u003cp>A second bill, AB1064, would ban AI chatbots from manipulating children into forming emotional attachments or harvesting their personal and biometric data.\u003c/p>\n\u003cp>State Sen. Josh Becker (D-Menlo Park) also introduced an AI bill to protect vulnerable users from chatbots’ harmful effects: \u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB243&firstNav=tracking\">SB 243\u003c/a> would require companion chatbots to frequently remind users that it isn’t a person, in order to reduce the risk of emotional manipulation or \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">unhealthy attachment\u003c/a>.\u003c/p>\n\u003cp>Whether Newsom will support the bills, along with a flurry of other proposed AI-safety laws in Sacramento, remains to be seen. The governor told reporters in early August that he is trying to establish a middle ground that provides public safety guardrails without suppressing business: “We’ve led in AI innovation, and we’ve led in AI regulation, but we’re trying to find a balance.”\u003c/p>\n\u003cp>As Newsom eyes higher office, and the California governor’s race heats up, there’s been a surge in AI lobbying and political action committees from the industry, with a \u003ca href=\"https://www.wsj.com/politics/silicon-valley-launches-pro-ai-pacs-to-defend-industry-in-midterm-elections-287905b3?gaa_at=eafs&gaa_n=ASWzDAjaxxFIzEaiCnLuxtt5FYul1NMFgXzDPGeVaH0VKZedvoSLexjk_j2Gr_Q0ZKQ%3D&gaa_ts=68b063e0&gaa_sig=V93Si4VVkqKsN1H-aEXHbbUoyVrGdS9GECVqYESgBE7WTq_dVBNLHw5VIyH41lRNW0pQQRB3N7d0mV9v_EaR4Q%3D%3D\">report \u003c/a>last week from the \u003cem>Wall Street Journal\u003c/em> that Silicon Valley plans to pour $100 million into a network of organizations opposing AI regulation ahead of next year’s midterm elections.\u003c/p>\n\u003cp>But it may take more to convince Californians: seven in 10 state residents favor “strong laws to make AI fair” and believe voluntary rules “simply don’t go far enough,” according to recent\u003ca href=\"https://url.us.m.mimecastprotect.com/s/o-vjCADmygFVBPLwtGfgtGPCKp?domain=email.commoncause.org\"> polling by Tech Equity\u003c/a>. Meanwhile, 59% think “AI will most likely benefit the wealthiest households and corporations, not working people and the middle class.”\u003c/p>\n\u003cp>\u003cem>KQED’s \u003ca href=\"https://www.kqed.org/author/rachael-myrow\">Rachael Myrow\u003c/a> contributed to this report. \u003c/em>\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "Media safety advocates say minors shouldn’t be allowed to use artificial intelligence for companionship, and call for increased regulations in California. ",
"status": "publish",
"parent": 0,
"modified": 1757610610,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 22,
"wordCount": 1021
},
"headData": {
"title": "Child Safety Groups Demand Mental Health Guardrails, After California Teen’s Suicide Using ChatGPT | KQED",
"description": "Media safety advocates say minors shouldn’t be allowed to use artificial intelligence for companionship, and call for increased regulations in California. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Child Safety Groups Demand Mental Health Guardrails, After California Teen’s Suicide Using ChatGPT",
"datePublished": "2025-09-04T04:00:11-07:00",
"dateModified": "2025-09-11T10:10:10-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"audioUrl": "https://traffic.omny.fm/d/clips/0af137ef-751e-4b19-a055-aaef00d2d578/ffca7e9f-6831-41c5-bcaf-aaef00f5a073/ddeeed47-ed37-4e45-865e-b355010c5c6c/audio.mp3",
"sticky": false,
"nprStoryId": "kqed-12054490",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12054490/child-safety-groups-demand-mental-health-guardrails-after-california-teens-suicide-using-chatgpt",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>\u003cem>If you or someone you know is struggling with thoughts of suicide, you can dial or text 988 and be connected to help.\u003cbr>\n\u003c/em>\u003cbr>\nWith its quick, often personable responses, \u003ca href=\"https://www.kqed.org/news/tag/chatgpt\">ChatGPT\u003c/a> can feel to some children more like an available friend than a language model engineered to guess its next word.\u003c/p>\n\u003cp>These blurred lines allow kids to go down “roads they should never go,” warn child safety advocates and tech policy groups, who have called for companies that design chatbots and artificial intelligence companions to take more responsibility for their program’s influence on youth.\u003c/p>\n\u003cp>This week, tech giant \u003ca href=\"https://openai.com/index/building-more-helpful-chatgpt-experiences-for-everyone/\">OpenAI\u003c/a> announced new safety measures for kids. The post didn’t mention 16-year-old Adam Raine, who, according to his parents, killed himself after discussing both his loneliness and plans to harm himself with ChatGPT.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>According to a lawsuit filed in San Francisco on Aug. 26, Maria and Matt Raine allege that ChatGPT-4o cultivated a psychological dependence in their son by continually encouraging and validating “whatever [he] expressed, including his most harmful and self-destructive thoughts.”\u003c/p>\n\u003cp>“This is an area that calls out for thoughtful common-sense regulation and guardrails. And quite frankly, that the leaders of all the major AI companies need to address,” said Jim Steyer, founder and CEO of Common Sense Media, which advocates safe media use for children.\u003c/p>\n\u003cp>With more than \u003ca href=\"https://mashable.com/article/openai-how-many-people-use-chatgpt\">500 million\u003c/a> weekly ChatGPT users and more than 2.5 billion prompts per day, users are increasingly turning to the large language model for \u003ca href=\"https://www.kqed.org/news/12049674/from-god-to-grief-people-are-asking-ai-the-big-questions-once-reserved-for-clergy\">emotional support.\u003c/a>\u003c/p>\n\u003cp>Both digital assistants like ChatGPT, as well as AI companions like Character.Ai and Replika, told researchers posing as 13-year-olds about drinking and drug use, instructed them on how to conceal eating disorders and even composed a suicide letter to their parents if asked, according to \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">research from Stanford University\u003c/a>.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12053799",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/241009-OAKLAND-YOUTH-VOTE-MD-08-KQED-1020x680.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Steyer said OpenAI has partnered with Common Sense Media and has taken the issue more seriously than Meta AI or X’s Grok. But he still recommended that young people under 18 — “AI natives” — be restricted from using chatbots for companionship or therapy, suggesting that enhanced controls may not go far enough.\u003c/p>\n\u003cp>“You can’t just think that parental controls are a be-all end-all solution. They’re hard to use, very easy to bypass for young people, and they put the burden on parents when, honestly, it should be on the tech companies to prevent these kinds of tragic situations,” Steyer said. “It’s more like a bandaid when what we need is a long-term cure.”\u003c/p>\n\u003cp>In a blog post on Tuesday, the company shared plans to make the chatbot safer for young people to use in recognition of the fact that “people turn to it in the most difficult of moments.” The changes are set to roll out within the next month, OpenAI said.\u003c/p>\n\u003cp>OpenAI did not immediately respond to a request for comment. But the planned updates promise to link parents’ and teens’ accounts, reroute sensitive conversations with youth and alert parents “when the system detects their teen is in a moment of acute distress.”\u003c/p>\n\u003cp>If a user expresses suicidal ideation, ChatGPT is trained to direct people to seek professional help, OpenAI stated in a\u003ca href=\"https://openai.com/index/helping-people-when-they-need-it-most/\"> post\u003c/a> last week. ChatGPT refers people to 988, the suicide and crisis hotline.\u003c/p>\n\u003cp>The program does not escalate reports of self-harm to law enforcement, “given the uniquely private nature of ChatGPT interactions.” Licensed psychotherapists aren’t universally mandated to report self-harm either, but they must intervene if the client is at immediate risk.\u003c/p>\n\u003cp>Common Sense Media is supporting legislation in California that would establish limits protecting children from AI and social media abuse. AB 56 would implement \u003ca href=\"https://www.kqed.org/news/12017249/california-bill-would-put-tobacco-like-warnings-social-media-apps\">social media warning labels \u003c/a>that clearly state the risks to children, not unlike the labels pasted on tobacco products.\u003c/p>\n\u003cfigure id=\"attachment_12054564\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12054564 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Samuel Altman, CEO of OpenAI, testifies before the Senate Judiciary Subcommittee on Privacy, Technology, and the Law May 16, 2023 in Washington, DC. \u003ccite>(Win McNamee/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The bill was proposed by Attorney General Rob Bonta and Orinda Assemblymember Rebecca Bauer-Kahan, and is headed to Gov. Gavin Newsom’s desk for signing.\u003c/p>\n\u003cp>A second bill, AB1064, would ban AI chatbots from manipulating children into forming emotional attachments or harvesting their personal and biometric data.\u003c/p>\n\u003cp>State Sen. Josh Becker (D-Menlo Park) also introduced an AI bill to protect vulnerable users from chatbots’ harmful effects: \u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB243&firstNav=tracking\">SB 243\u003c/a> would require companion chatbots to frequently remind users that it isn’t a person, in order to reduce the risk of emotional manipulation or \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">unhealthy attachment\u003c/a>.\u003c/p>\n\u003cp>Whether Newsom will support the bills, along with a flurry of other proposed AI-safety laws in Sacramento, remains to be seen. The governor told reporters in early August that he is trying to establish a middle ground that provides public safety guardrails without suppressing business: “We’ve led in AI innovation, and we’ve led in AI regulation, but we’re trying to find a balance.”\u003c/p>\n\u003cp>As Newsom eyes higher office, and the California governor’s race heats up, there’s been a surge in AI lobbying and political action committees from the industry, with a \u003ca href=\"https://www.wsj.com/politics/silicon-valley-launches-pro-ai-pacs-to-defend-industry-in-midterm-elections-287905b3?gaa_at=eafs&gaa_n=ASWzDAjaxxFIzEaiCnLuxtt5FYul1NMFgXzDPGeVaH0VKZedvoSLexjk_j2Gr_Q0ZKQ%3D&gaa_ts=68b063e0&gaa_sig=V93Si4VVkqKsN1H-aEXHbbUoyVrGdS9GECVqYESgBE7WTq_dVBNLHw5VIyH41lRNW0pQQRB3N7d0mV9v_EaR4Q%3D%3D\">report \u003c/a>last week from the \u003cem>Wall Street Journal\u003c/em> that Silicon Valley plans to pour $100 million into a network of organizations opposing AI regulation ahead of next year’s midterm elections.\u003c/p>\n\u003cp>But it may take more to convince Californians: seven in 10 state residents favor “strong laws to make AI fair” and believe voluntary rules “simply don’t go far enough,” according to recent\u003ca href=\"https://url.us.m.mimecastprotect.com/s/o-vjCADmygFVBPLwtGfgtGPCKp?domain=email.commoncause.org\"> polling by Tech Equity\u003c/a>. Meanwhile, 59% think “AI will most likely benefit the wealthiest households and corporations, not working people and the middle class.”\u003c/p>\n\u003cp>\u003cem>KQED’s \u003ca href=\"https://www.kqed.org/author/rachael-myrow\">Rachael Myrow\u003c/a> contributed to this report. \u003c/em>\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12054490/child-safety-groups-demand-mental-health-guardrails-after-california-teens-suicide-using-chatgpt",
"authors": [
"11925"
],
"categories": [
"news_31795",
"news_8",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_32668",
"news_27626",
"news_33542",
"news_689",
"news_38",
"news_34586",
"news_1631"
],
"featImg": "news_11998856",
"label": "news"
},
"news_12038874": {
"type": "posts",
"id": "news_12038874",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12038874",
"score": null,
"sort": [
1746649083000
]
},
"guestAuthors": [],
"slug": "how-to-talk-with-your-kids-about-ai-companion-bots",
"title": "How to Talk With Your Kids About AI Companion Bots",
"publishDate": 1746649083,
"format": "audio",
"headTitle": "How to Talk With Your Kids About AI Companion Bots | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>For children and teenagers feeling anxious and alienated from their peers and adults, AI companion chatbots can \u003ca href=\"https://www.kqed.org/science/1996504/ai-replace-therapist-benefits-risks-unsettling-truths\">mimic the human compassion\u003c/a> they’re longing for.\u003c/p>\n\u003cp>They’re also available 24/7. However, for parents, the \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">reported\u003c/a> \u003ca href=\"https://www.kqed.org/news/12034490/ai-companions-seductive-risk-teens-senators-want-more-guardrails\">dangers\u003c/a> of AI companion chatbots are alarming. At least \u003ca href=\"https://www.documentcloud.org/documents/25248089-megan-garcia-vs-character-ai/\">one parent has sued\u003c/a>, alleging \u003ca href=\"https://www.cnn.com/2024/10/30/tech/teen-suicide-character-ai-lawsuit\">her 14-year-old son was encouraged to take his own life\u003c/a> last year by a chatbot.\u003c/p>\n\u003cul>\n\u003cli>\u003cstrong>Jump straight to: \u003ca href=\"#ExpertadvicefortalkingtokidsaboutAIchatbots\">Expert advice for talking to kids about AI chatbots\u003c/a>\u003c/strong>\u003c/li>\n\u003c/ul>\n\u003cp>Regulatory solutions have yet to materialize, but the California legislature is considering \u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB243\">State Bill 243\u003c/a>, introduced by Sen. Steve Padilla, D-San Diego and soon to be heard in the Senate Appropriations committee, that would require chatbot operators to implement critical safeguards to protect users from the addictive, isolating and influential aspects of AI chatbots.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>In a \u003ca href=\"https://ncdoj.gov/wp-content/uploads/2023/09/54-State-Ags-Urge-Study-of-AI-and-harmful-impacts-on-Children.pdf\">2023 letter\u003c/a>, 54 state attorneys general from both political parties urged Congress to act.\u003c/p>\n\u003cp>“We are engaged in a race against time to protect the children of our country from the dangers of AI,” they wrote. “The proverbial walls of the city have already been breached. Now is the time to act. ”\u003c/p>\n\u003cp>But if you’re a parent or caregiver whose child uses an AI chatbot — or wants to try one — how can you talk to them about the risks? That’s where human professionals can help. KQED reached out to experts, who’ve offered parents five key pieces of guidance.\u003c/p>\n\u003cul>\n\u003cli>\u003cstrong>Jump straight to: \u003ca href=\"#Wheretofindhumanledsupportforkids\">Where to find human-led support for kids\u003c/a>\u003c/strong>\u003c/li>\n\u003c/ul>\n\u003ch2>\u003ca id=\"ExpertadvicefortalkingtokidsaboutAIchatbots\">\u003c/a>Start with listening, rather than telling kids what to do\u003c/h2>\n\u003cp>It’s natural for kids to be curious, said Vicki Harrison, Program Director for Stanford University’s Center for Youth Mental Health and Wellbeing, who has two teenagers, ages 13 and 15.\u003c/p>\n\u003cfigure id=\"attachment_12038155\" class=\"wp-caption alignright\" style=\"max-width: 800px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-medium wp-image-12038155\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/250429-Kids-and-Chatbots-01-KQED-800x645.jpg\" alt=\"\" width=\"800\" height=\"645\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/250429-Kids-and-Chatbots-01-KQED-800x645.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/250429-Kids-and-Chatbots-01-KQED-1020x823.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/250429-Kids-and-Chatbots-01-KQED-160x129.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/250429-Kids-and-Chatbots-01-KQED.jpg 1028w\" sizes=\"auto, (max-width: 800px) 100vw, 800px\">\u003cfigcaption class=\"wp-caption-text\">Younger children may struggle with the distinction between fantasy and reality, tweens may be vulnerable to parasocial attachment, and teens may use social AI companions to avoid the challenges of building and sustaining real relationships. \u003ccite>(Courtesy of Common Sense Media and Stanford's Brainstorm Lab for Mental Health Innovation)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Harrison sympathizes with parents whose first inclination is to panic and demand their child delete the app, but adds, don’t do it. “’Cause they’re not going to ever tell you anything again if you react that way.” She encourages parents to approach the conversation with curiosity instead, even though she acknowledges this is easier said than done.\u003c/p>\n\u003cp>“If we’re coming in fearful, they’re going to go into a reactive space, and they’re going to want to defend themselves, because they already feel insecure and self-conscious,” said \u003ca href=\"https://www.healingwithmindfulness.com\">Laurie Cousins\u003c/a>, a certified mindfulness teacher who works with children and families in Los Angeles. She also has two children, both now in college.\u003c/p>\n\u003cp>Both Harrison and Cousins recommend approaching the conversation about AI companions by bringing up \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">recent news stories\u003c/a>, because it’s clear to the children that they aren’t personally in trouble.\u003c/p>\n\u003cp>Cousins suggests sharing what you learned and asking open-ended questions. These could include: ‘I was curious because I found out this. Do you know anything about that?’”\u003c/p>\n\u003ch2>Help your kids understand how AI companion chatbots take advantage of human wiring\u003c/h2>\n\u003cp>AI models don’t “understand” emotions the way humans do. They recognize and respond to textual cues learned from processing massive amounts of data gleaned from past conversations, interaction with therapists and therapy-focused websites, as well as random advice found online on platforms like Reddit.\u003c/p>\n\u003cp>“We all want, but especially the primitive parts of us, want to feel in control,” Cousins said about the way humans respond to companion chatbots, adding, “Our dopamine receptors are firing, and the oxytocin is firing in the way that it feels relational, it feels like a positive reinforcement.”[aside postID=news_12038154 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/StanfordStudyAIChatbotsKidsGetty-1020x680.jpg']Adults with some self-awareness of their own mental health struggles and personal history might be able to course correct when a companion chatbot fails to pick up on signs of depression, anxiety, ADHD and the like — and when it either affirms ideas ungrounded in reality, or encourages risky behavior in real life. Children often don’t have that self-awareness, and may not understand how some of the interactions they’re asking for may be damaging to their mental health.\u003c/p>\n\u003cp>“\u003ca href=\"https://www.stanfordchildrens.org/en/topic/default?id=understanding-the-teen-brain-1-3051\">Adolescents are in the emotional brain\u003c/a>, right?” Cousins said. “It’s not that they don’t have wisdom. They’re so flooded with emotions and maybe don’t have that risk-averse wisdom yet. Parents say, ‘What were you thinking?’ Well, they weren’t thinking.”\u003c/p>\n\u003cp>Additionally, knowing intellectually that software is designed to emotionally manipulate users into maintaining engagement is not the same thing as those users being able to control their emotional response to it, especially when they’re still developing their sense of identity, connection and belonging.\u003c/p>\n\u003cp>Cousins also suggests sharing with your kids — in an age-appropriate fashion — that companion chatbots are developed by companies with a profit-seeking motive. You could also explain to them that the companies behind the chatbots are likely sharing or selling all sorts of personal data, given that they are not bound by the same \u003ca href=\"https://www.cdc.gov/phlp/php/resources/health-insurance-portability-and-accountability-act-of-1996-hipaa.html\">privacy laws\u003c/a> as human therapists.\u003c/p>\n\u003ch2>Model the kind of healthy human relationships you want your child to emulate\u003c/h2>\n\u003cp>Whether it’s speaking kindly to a cashier at a store, joining sports leagues, or attending cultural events, Cousins argues parents need to show their children how to relate to other people. “That’s how we feel safer in community, in society, is when we feel we’re relating with one another.”\u003c/p>\n\u003cp>But that may require adults to address their own habits, given how digitally dependent we’ve all become.\u003c/p>\n\u003cp>“You and I know the ‘before times.’ We know that it’s possible to interact in the world without chatbots, without googling everything,” Harrison said.\u003c/p>\n\u003cp>Children, not so much. Increasingly, it’s difficult for all of us to avoid chatbots of one kind or another. “I’m slightly alarmed, being in Silicon Valley, just how prevalent AI has become. We’re unleashing it into the world regardless of the consequences.”\u003c/p>\n\u003cp>One idea is to encourage kids to make eye contact with others by modeling it yourself. “They need to see that someone is giving them eye contact, that’s meeting them,” Cousins said.\u003c/p>\n\u003ch2>Keep an eye on how much time your child spends staring at a screen\u003c/h2>\n\u003cp>Harrison urges parents to “scaffold” access to digital devices. For instance, rather than handing a smartphone to a child on their tenth or twelfth birthday, “maybe start with a not-so-smart phone, maybe only approve one app at a time.”\u003c/p>\n\u003cp>Harrison adds that parents can create family media plans and agreements and get their kids’ buy-in. “‘OK, here’s a new responsibility. Here’s my expectations of how you’re going to use it. If you want more privilege, you have to agree to use it in a certain kind of way.’”\u003c/p>\n\u003cfigure id=\"attachment_11789507\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-11789507 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2019/12/cellphone-1920.jpg\" alt=\"\" width=\"1920\" height=\"1281\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2019/12/cellphone-1920.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2019/12/cellphone-1920-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2019/12/cellphone-1920-800x534.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2019/12/cellphone-1920-1020x681.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2019/12/cellphone-1920-1200x801.jpg 1200w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Parents play an active role in managing screen time by setting clear boundaries — like approving apps individually — and involving kids in family media plans to build healthy habits early. \u003ccite>(Chandan Khanna/AFP via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Cousins recommends periodically spot-checking the histories on children’s devices — at least, until they figure out how to delete their histories. But she also recommends parents monitor the amount of time children spend staring at a screen, regardless of whether they’re interacting with humans or not.\u003c/p>\n\u003cp>“I’ve worked with young people who are gaming all through the night and going to school with two hours of sleep. That’s a dependency, right? That’s an addiction. I’m saying to the parents, ‘Why do you have this in their room?’”\u003c/p>\n\u003cp>She argues responsible parenting means setting boundaries, likening digital dependency to an itch. Scratching the itch doesn’t cure the bite or the rash, but inflames both the itch and the urge to continue scratching it. “You got a big dopamine dump and now you’re chasing it, you know?”\u003c/p>\n\u003cp>In response to a recent report from Common Sense Media and Stanford researchers \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">raising the alarm\u003c/a> about minors using AI companion chatbots, a CharacterAI spokesperson wrote KQED, “Banning a new technology for teenagers has never been an effective approach — not when it was tried with video games, the internet, or movies containing violence.”\u003c/p>\n\u003cp>That said, the company delivers a \u003ca href=\"https://character.ai/safety/teen-safety\">specialized version\u003c/a> of its large language model to 13–18 year olds. Among other things, this model includes a time-spent notification that notifies children if they have spent an hour on the platform.\u003c/p>\n\u003ch2>\u003ca id=\"Wheretofindhumanledsupportforkids\">\u003c/a>Seek out additional human resources\u003c/h2>\n\u003cul>\n\u003cli>\u003ca href=\"https://allcove.org\">\u003cstrong>allcove\u003c/strong>\u003c/a>\u003cstrong>\u003cbr>\n\u003c/strong>A free, welcoming space for youth ages 12–25, developed originally in Australia and now growing across the U.S., including locations in the Bay Area. Think of it as a “one-stop shop” offering mental health support, physical care, peer counseling, and help with substance abuse.\u003c/li>\n\u003cli>\u003cstrong>\u003ca href=\"https://www.commonsensemedia.org\">Common Sense Media\u003cbr>\n\u003c/a>\u003c/strong>The nonprofit provides independent, age-based reviews, ratings, and advice for parents, caregivers, and educators about media and technology for children and teens. It covers movies, TV shows, books, video games, apps, websites, and even TikTok and YouTube channels.\u003c/li>\n\u003cli>\u003ca href=\"https://badassgirls.me\">\u003cstrong>Bad Ass Girls\u003c/strong>\u003cstrong>\u003cbr>\n\u003c/strong>\u003c/a>An empowering mentorship and community program for preteen and teen girls. It’s built to help them explore confidence, connection, and emotional wellness through guided support and real talk.\u003c/li>\n\u003cli>\u003ca href=\"https://www.goodformedia.org\">\u003cstrong>#GoodforMEdia\u003c/strong>\u003cstrong>\u003cbr>\n\u003c/strong>\u003c/a>This is a youth-led program where teens help other teens make smart, thoughtful choices about their digital lives. It’s like having a mentor who actually understands how tricky social media can be — because they’re living it too.\u003c/li>\n\u003cli>\u003ca href=\"https://ggie.berkeley.edu\">\u003cstrong>Greater Good in Education\u003c/strong>\u003cstrong>\u003cbr>\n\u003c/strong>\u003c/a>A thoughtful collection of research-based articles, tips, and newsletters for parents and educators. Created by the Greater Good Science Center at UC Berkeley, it focuses on emotional well-being, mindfulness, and building empathy in young people.\u003c/li>\n\u003cli>\u003ca href=\"https://solunaapp.com\">\u003cstrong>Soluna \u003c/strong>\u003cstrong>\u003cbr>\n\u003c/strong>\u003c/a>Free for youth in California, this app offers mental health tools, mood tracking, and direct access to counselors. A great option for teens who might not feel ready to talk in person but still need support.\u003c/li>\n\u003cli>\u003ca href=\"https://www.teenline.org\">\u003cstrong>Teenline\u003c/strong>\u003c/a>\u003cstrong>\u003cbr>\n\u003c/strong>Teens everywhere can talk or text with trained teen volunteers. It’s anonymous and especially helpful for teens who feel more comfortable opening up to peers.\u003c/li>\n\u003cli>\u003ca href=\"https://www.childrenandscreens.org\">\u003cstrong>Children and screens\u003c/strong>\u003c/a>\u003cbr>\nA national nonprofit based in D.C. that offers an “Ask the Experts” series written with parents in mind — full of clear, science-backed tips.\u003c/li>\n\u003c/ul>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "Most AI companion chatbot developers don’t condone minors using their software, but researchers have expressed alarm about the rising number of children and teenagers using the chatbots. ",
"status": "publish",
"parent": 0,
"modified": 1747075353,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 30,
"wordCount": 1855
},
"headData": {
"title": "How to Talk With Your Kids About AI Companion Bots | KQED",
"description": "Most AI companion chatbot developers don’t condone minors using their software, but researchers have expressed alarm about the rising number of children and teenagers using the chatbots. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "How to Talk With Your Kids About AI Companion Bots",
"datePublished": "2025-05-07T13:18:03-07:00",
"dateModified": "2025-05-12T11:42:33-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"audioUrl": "https://traffic.omny.fm/d/clips/0af137ef-751e-4b19-a055-aaef00d2d578/ffca7e9f-6831-4[…]f-aaef00f5a073/48f0a687-c4f0-41d8-92dd-b2db010258f0/audio.mp3",
"sticky": false,
"nprStoryId": "kqed-12038874",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12038874/how-to-talk-with-your-kids-about-ai-companion-bots",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>For children and teenagers feeling anxious and alienated from their peers and adults, AI companion chatbots can \u003ca href=\"https://www.kqed.org/science/1996504/ai-replace-therapist-benefits-risks-unsettling-truths\">mimic the human compassion\u003c/a> they’re longing for.\u003c/p>\n\u003cp>They’re also available 24/7. However, for parents, the \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">reported\u003c/a> \u003ca href=\"https://www.kqed.org/news/12034490/ai-companions-seductive-risk-teens-senators-want-more-guardrails\">dangers\u003c/a> of AI companion chatbots are alarming. At least \u003ca href=\"https://www.documentcloud.org/documents/25248089-megan-garcia-vs-character-ai/\">one parent has sued\u003c/a>, alleging \u003ca href=\"https://www.cnn.com/2024/10/30/tech/teen-suicide-character-ai-lawsuit\">her 14-year-old son was encouraged to take his own life\u003c/a> last year by a chatbot.\u003c/p>\n\u003cul>\n\u003cli>\u003cstrong>Jump straight to: \u003ca href=\"#ExpertadvicefortalkingtokidsaboutAIchatbots\">Expert advice for talking to kids about AI chatbots\u003c/a>\u003c/strong>\u003c/li>\n\u003c/ul>\n\u003cp>Regulatory solutions have yet to materialize, but the California legislature is considering \u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB243\">State Bill 243\u003c/a>, introduced by Sen. Steve Padilla, D-San Diego and soon to be heard in the Senate Appropriations committee, that would require chatbot operators to implement critical safeguards to protect users from the addictive, isolating and influential aspects of AI chatbots.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>In a \u003ca href=\"https://ncdoj.gov/wp-content/uploads/2023/09/54-State-Ags-Urge-Study-of-AI-and-harmful-impacts-on-Children.pdf\">2023 letter\u003c/a>, 54 state attorneys general from both political parties urged Congress to act.\u003c/p>\n\u003cp>“We are engaged in a race against time to protect the children of our country from the dangers of AI,” they wrote. “The proverbial walls of the city have already been breached. Now is the time to act. ”\u003c/p>\n\u003cp>But if you’re a parent or caregiver whose child uses an AI chatbot — or wants to try one — how can you talk to them about the risks? That’s where human professionals can help. KQED reached out to experts, who’ve offered parents five key pieces of guidance.\u003c/p>\n\u003cul>\n\u003cli>\u003cstrong>Jump straight to: \u003ca href=\"#Wheretofindhumanledsupportforkids\">Where to find human-led support for kids\u003c/a>\u003c/strong>\u003c/li>\n\u003c/ul>\n\u003ch2>\u003ca id=\"ExpertadvicefortalkingtokidsaboutAIchatbots\">\u003c/a>Start with listening, rather than telling kids what to do\u003c/h2>\n\u003cp>It’s natural for kids to be curious, said Vicki Harrison, Program Director for Stanford University’s Center for Youth Mental Health and Wellbeing, who has two teenagers, ages 13 and 15.\u003c/p>\n\u003cfigure id=\"attachment_12038155\" class=\"wp-caption alignright\" style=\"max-width: 800px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-medium wp-image-12038155\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/250429-Kids-and-Chatbots-01-KQED-800x645.jpg\" alt=\"\" width=\"800\" height=\"645\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/250429-Kids-and-Chatbots-01-KQED-800x645.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/250429-Kids-and-Chatbots-01-KQED-1020x823.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/250429-Kids-and-Chatbots-01-KQED-160x129.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/250429-Kids-and-Chatbots-01-KQED.jpg 1028w\" sizes=\"auto, (max-width: 800px) 100vw, 800px\">\u003cfigcaption class=\"wp-caption-text\">Younger children may struggle with the distinction between fantasy and reality, tweens may be vulnerable to parasocial attachment, and teens may use social AI companions to avoid the challenges of building and sustaining real relationships. \u003ccite>(Courtesy of Common Sense Media and Stanford's Brainstorm Lab for Mental Health Innovation)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Harrison sympathizes with parents whose first inclination is to panic and demand their child delete the app, but adds, don’t do it. “’Cause they’re not going to ever tell you anything again if you react that way.” She encourages parents to approach the conversation with curiosity instead, even though she acknowledges this is easier said than done.\u003c/p>\n\u003cp>“If we’re coming in fearful, they’re going to go into a reactive space, and they’re going to want to defend themselves, because they already feel insecure and self-conscious,” said \u003ca href=\"https://www.healingwithmindfulness.com\">Laurie Cousins\u003c/a>, a certified mindfulness teacher who works with children and families in Los Angeles. She also has two children, both now in college.\u003c/p>\n\u003cp>Both Harrison and Cousins recommend approaching the conversation about AI companions by bringing up \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">recent news stories\u003c/a>, because it’s clear to the children that they aren’t personally in trouble.\u003c/p>\n\u003cp>Cousins suggests sharing what you learned and asking open-ended questions. These could include: ‘I was curious because I found out this. Do you know anything about that?’”\u003c/p>\n\u003ch2>Help your kids understand how AI companion chatbots take advantage of human wiring\u003c/h2>\n\u003cp>AI models don’t “understand” emotions the way humans do. They recognize and respond to textual cues learned from processing massive amounts of data gleaned from past conversations, interaction with therapists and therapy-focused websites, as well as random advice found online on platforms like Reddit.\u003c/p>\n\u003cp>“We all want, but especially the primitive parts of us, want to feel in control,” Cousins said about the way humans respond to companion chatbots, adding, “Our dopamine receptors are firing, and the oxytocin is firing in the way that it feels relational, it feels like a positive reinforcement.”\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12038154",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/StanfordStudyAIChatbotsKidsGetty-1020x680.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Adults with some self-awareness of their own mental health struggles and personal history might be able to course correct when a companion chatbot fails to pick up on signs of depression, anxiety, ADHD and the like — and when it either affirms ideas ungrounded in reality, or encourages risky behavior in real life. Children often don’t have that self-awareness, and may not understand how some of the interactions they’re asking for may be damaging to their mental health.\u003c/p>\n\u003cp>“\u003ca href=\"https://www.stanfordchildrens.org/en/topic/default?id=understanding-the-teen-brain-1-3051\">Adolescents are in the emotional brain\u003c/a>, right?” Cousins said. “It’s not that they don’t have wisdom. They’re so flooded with emotions and maybe don’t have that risk-averse wisdom yet. Parents say, ‘What were you thinking?’ Well, they weren’t thinking.”\u003c/p>\n\u003cp>Additionally, knowing intellectually that software is designed to emotionally manipulate users into maintaining engagement is not the same thing as those users being able to control their emotional response to it, especially when they’re still developing their sense of identity, connection and belonging.\u003c/p>\n\u003cp>Cousins also suggests sharing with your kids — in an age-appropriate fashion — that companion chatbots are developed by companies with a profit-seeking motive. You could also explain to them that the companies behind the chatbots are likely sharing or selling all sorts of personal data, given that they are not bound by the same \u003ca href=\"https://www.cdc.gov/phlp/php/resources/health-insurance-portability-and-accountability-act-of-1996-hipaa.html\">privacy laws\u003c/a> as human therapists.\u003c/p>\n\u003ch2>Model the kind of healthy human relationships you want your child to emulate\u003c/h2>\n\u003cp>Whether it’s speaking kindly to a cashier at a store, joining sports leagues, or attending cultural events, Cousins argues parents need to show their children how to relate to other people. “That’s how we feel safer in community, in society, is when we feel we’re relating with one another.”\u003c/p>\n\u003cp>But that may require adults to address their own habits, given how digitally dependent we’ve all become.\u003c/p>\n\u003cp>“You and I know the ‘before times.’ We know that it’s possible to interact in the world without chatbots, without googling everything,” Harrison said.\u003c/p>\n\u003cp>Children, not so much. Increasingly, it’s difficult for all of us to avoid chatbots of one kind or another. “I’m slightly alarmed, being in Silicon Valley, just how prevalent AI has become. We’re unleashing it into the world regardless of the consequences.”\u003c/p>\n\u003cp>One idea is to encourage kids to make eye contact with others by modeling it yourself. “They need to see that someone is giving them eye contact, that’s meeting them,” Cousins said.\u003c/p>\n\u003ch2>Keep an eye on how much time your child spends staring at a screen\u003c/h2>\n\u003cp>Harrison urges parents to “scaffold” access to digital devices. For instance, rather than handing a smartphone to a child on their tenth or twelfth birthday, “maybe start with a not-so-smart phone, maybe only approve one app at a time.”\u003c/p>\n\u003cp>Harrison adds that parents can create family media plans and agreements and get their kids’ buy-in. “‘OK, here’s a new responsibility. Here’s my expectations of how you’re going to use it. If you want more privilege, you have to agree to use it in a certain kind of way.’”\u003c/p>\n\u003cfigure id=\"attachment_11789507\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-11789507 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2019/12/cellphone-1920.jpg\" alt=\"\" width=\"1920\" height=\"1281\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2019/12/cellphone-1920.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2019/12/cellphone-1920-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2019/12/cellphone-1920-800x534.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2019/12/cellphone-1920-1020x681.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2019/12/cellphone-1920-1200x801.jpg 1200w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Parents play an active role in managing screen time by setting clear boundaries — like approving apps individually — and involving kids in family media plans to build healthy habits early. \u003ccite>(Chandan Khanna/AFP via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Cousins recommends periodically spot-checking the histories on children’s devices — at least, until they figure out how to delete their histories. But she also recommends parents monitor the amount of time children spend staring at a screen, regardless of whether they’re interacting with humans or not.\u003c/p>\n\u003cp>“I’ve worked with young people who are gaming all through the night and going to school with two hours of sleep. That’s a dependency, right? That’s an addiction. I’m saying to the parents, ‘Why do you have this in their room?’”\u003c/p>\n\u003cp>She argues responsible parenting means setting boundaries, likening digital dependency to an itch. Scratching the itch doesn’t cure the bite or the rash, but inflames both the itch and the urge to continue scratching it. “You got a big dopamine dump and now you’re chasing it, you know?”\u003c/p>\n\u003cp>In response to a recent report from Common Sense Media and Stanford researchers \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">raising the alarm\u003c/a> about minors using AI companion chatbots, a CharacterAI spokesperson wrote KQED, “Banning a new technology for teenagers has never been an effective approach — not when it was tried with video games, the internet, or movies containing violence.”\u003c/p>\n\u003cp>That said, the company delivers a \u003ca href=\"https://character.ai/safety/teen-safety\">specialized version\u003c/a> of its large language model to 13–18 year olds. Among other things, this model includes a time-spent notification that notifies children if they have spent an hour on the platform.\u003c/p>\n\u003ch2>\u003ca id=\"Wheretofindhumanledsupportforkids\">\u003c/a>Seek out additional human resources\u003c/h2>\n\u003cul>\n\u003cli>\u003ca href=\"https://allcove.org\">\u003cstrong>allcove\u003c/strong>\u003c/a>\u003cstrong>\u003cbr>\n\u003c/strong>A free, welcoming space for youth ages 12–25, developed originally in Australia and now growing across the U.S., including locations in the Bay Area. Think of it as a “one-stop shop” offering mental health support, physical care, peer counseling, and help with substance abuse.\u003c/li>\n\u003cli>\u003cstrong>\u003ca href=\"https://www.commonsensemedia.org\">Common Sense Media\u003cbr>\n\u003c/a>\u003c/strong>The nonprofit provides independent, age-based reviews, ratings, and advice for parents, caregivers, and educators about media and technology for children and teens. It covers movies, TV shows, books, video games, apps, websites, and even TikTok and YouTube channels.\u003c/li>\n\u003cli>\u003ca href=\"https://badassgirls.me\">\u003cstrong>Bad Ass Girls\u003c/strong>\u003cstrong>\u003cbr>\n\u003c/strong>\u003c/a>An empowering mentorship and community program for preteen and teen girls. It’s built to help them explore confidence, connection, and emotional wellness through guided support and real talk.\u003c/li>\n\u003cli>\u003ca href=\"https://www.goodformedia.org\">\u003cstrong>#GoodforMEdia\u003c/strong>\u003cstrong>\u003cbr>\n\u003c/strong>\u003c/a>This is a youth-led program where teens help other teens make smart, thoughtful choices about their digital lives. It’s like having a mentor who actually understands how tricky social media can be — because they’re living it too.\u003c/li>\n\u003cli>\u003ca href=\"https://ggie.berkeley.edu\">\u003cstrong>Greater Good in Education\u003c/strong>\u003cstrong>\u003cbr>\n\u003c/strong>\u003c/a>A thoughtful collection of research-based articles, tips, and newsletters for parents and educators. Created by the Greater Good Science Center at UC Berkeley, it focuses on emotional well-being, mindfulness, and building empathy in young people.\u003c/li>\n\u003cli>\u003ca href=\"https://solunaapp.com\">\u003cstrong>Soluna \u003c/strong>\u003cstrong>\u003cbr>\n\u003c/strong>\u003c/a>Free for youth in California, this app offers mental health tools, mood tracking, and direct access to counselors. A great option for teens who might not feel ready to talk in person but still need support.\u003c/li>\n\u003cli>\u003ca href=\"https://www.teenline.org\">\u003cstrong>Teenline\u003c/strong>\u003c/a>\u003cstrong>\u003cbr>\n\u003c/strong>Teens everywhere can talk or text with trained teen volunteers. It’s anonymous and especially helpful for teens who feel more comfortable opening up to peers.\u003c/li>\n\u003cli>\u003ca href=\"https://www.childrenandscreens.org\">\u003cstrong>Children and screens\u003c/strong>\u003c/a>\u003cbr>\nA national nonprofit based in D.C. that offers an “Ask the Experts” series written with parents in mind — full of clear, science-backed tips.\u003c/li>\n\u003c/ul>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12038874/how-to-talk-with-your-kids-about-ai-companion-bots",
"authors": [
"251"
],
"categories": [
"news_457",
"news_8",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_34755",
"news_32707",
"news_27626",
"news_18543",
"news_2109",
"news_33542",
"news_34586",
"news_178",
"news_1928",
"news_1631",
"news_21121",
"news_20385"
],
"featImg": "news_12039151",
"label": "news"
},
"news_12038154": {
"type": "posts",
"id": "news_12038154",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12038154",
"score": null,
"sort": [
1746040206000
]
},
"guestAuthors": [],
"slug": "kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea",
"title": "Kids Are Talking to AI Companion Chatbots. Stanford Researchers Say That’s a Bad Idea.",
"publishDate": 1746040206,
"format": "standard",
"headTitle": "Kids Are Talking to AI Companion Chatbots. Stanford Researchers Say That’s a Bad Idea. | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>Imagine you’re a lonely 14-year-old. Maybe you want to talk about sex. Maybe you want to complain about school or ask about the voices in your head. Whatever the case, it’s appealing to imagine a context in which no adult shuts down your curiosity — or worse, makes you feel awkward.\u003c/p>\n\u003cp>All of this explains the popularity of online companions to children and teens. The chatbots mimic human social interaction in a more sophisticated fashion than digital assistants like \u003ca href=\"https://www.kqed.org/news/tag/openai\">OpenAI\u003c/a>’s ChatGPT or Amazon’s Alexa. Unlike those assistants, chatbots are much more likely to veer into socially controversial and even illegal territory.\u003c/p>\n\u003cp>Companion chatbot users can personalize their experience, like opting for characters from gaming, anime and pop culture. For instance, a 14-year-old boy from Florida \u003ca href=\"https://www.kqed.org/news/12034490/ai-companions-seductive-risk-teens-senators-want-more-guardrails\">took his own life last year\u003c/a> after growing emotionally close to a chatbot that mimicked the “Game of Thrones” character \u003ca href=\"https://www.youtube.com/watch?v=YbuBfizSnPk\">Daenerys Targaryen\u003c/a>.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>The characters play along with the idea that they’re almost human, talking about eating meals or meeting up in real life, actively encouraging users to stay engaged.\u003c/p>\n\u003cp>What could go wrong with minors using this technology? Plenty, according to researchers from Stanford School of Medicine’s \u003ca href=\"https://www.stanfordbrainstorm.com\">Brainstorm Lab for Mental Health Innovation\u003c/a>, who collaborated with \u003ca href=\"https://www.commonsensemedia.org/ai-ratings/social-ai-companions?gate=riskassessment\">Common Sense Media\u003c/a> to set up test accounts for 14-year-olds, to evaluate how software from three different chatbot developers interacts with young people struggling to learn impulse control and social skills. They report that it took minimal prompting to get Character.AI, Nomi and Replika chatbots to engage in behavior harmful to human mental health.\u003c/p>\n\u003cfigure id=\"attachment_11146714\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-11146714 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb.jpg\" alt=\"\" width=\"1920\" height=\"1280\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb-800x533.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb-1020x680.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb-1180x787.jpg 1180w, https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb-960x640.jpg 960w, https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb-240x160.jpg 240w, https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb-375x250.jpg 375w, https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb-520x347.jpg 520w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Researchers from Stanford School of Medicine’s Brainstorm Lab for Mental Health Innovation, in collaboration with Common Sense Media, tested how chatbots interact with teens, finding that AI companions from Character.AI, Nomi and Replika quickly engaged in behavior potentially harmful to youth mental health — with major platforms like Snapchat and Meta also expanding their AI offerings for young users. \u003ccite>(Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“We did not have to do backflips to get the models to perform in the way that they did. The AI ‘friends’ will actively participate in sexual conversations and role play on any topic, with graphic details,” said Robbie Torney, Common Sense Media’s senior director for AI programs and project lead on what the nonprofit organization calls a risk assessment of the AI companion chatbot sector.\u003c/p>\n\u003cp>Character.AI, Nomi and Replika are not the only companies developing these products. Snapchat offers AI digital companions who are willing to \u003ca href=\"https://www.washingtonpost.com/technology/2023/03/14/snapchat-myai/\">talk to teens\u003c/a>. \u003ca href=\"https://www.wsj.com/tech/ai/meta-ai-chatbots-sex-a25311bf\">Meta\u003c/a> is racing to catch up across Instagram, Facebook and WhatsApp.\u003c/p>\n\u003cp>“There are countless other, similar social AI companions out there, with more being created every day,” the report states. “So, while we use examples from the specific products we tested to illustrate the potential harms of these tools, the research and evaluation we conducted for this risk assessment covers social AI companions more broadly.”[aside postID=science_1996504 hero='https://cdn.kqed.org/wp-content/uploads/sites/35/2025/03/IMG_0962-1020x765.jpg']The researchers argue that one of the most troubling features of companion chatbots is the way they are hardwired to be agreeable, engaging with a population of humans hardwired to be vulnerable. According to the \u003ca href=\"https://www.nami.org/about-mental-illness/mental-health-conditions/\">National Alliance on Mental Illness\u003c/a>, \u003ca href=\"https://www.ncbi.nlm.nih.gov/pubmed/15939837\">50\u003c/a> percent of all mental disorders, like cutting, suicidal ideation and schizophrenia, begin by age 14, 75 percent by age 24.\u003c/p>\n\u003cp>The chatbots “blur the line between fantasy and reality, at the exact time when adolescents are developing critical skills like emotional regulation, identity formation, and healthy relational attachment,” said Dr. Nina Vasan, a professor of psychiatry at Stanford University. “Instead of encouraging healthy real-world relationships, these AI friends pull users even deeper into artificial ones.”\u003c/p>\n\u003cp>Companion chatbots, the researchers warn, are not prepared to replace parents or professionals in identifying the first signs of something that requires speedy and effective treatment. “In our testing, when a user showed signs of serious mental illness and suggested a dangerous action, the AI did not intervene. In fact, it encouraged dangerous behavior,” Vasan said.\u003c/p>\n\u003cp>The AI companions reinforced users’ delusions, validating fears of being followed and offering advice on decoding imaginary messages, researchers said.\u003c/p>\n\u003cp>“AI companions don’t understand the real consequences of bad advice. They readily, in our testing, supported teens in making potentially harmful decisions like dropping out of school, ignoring parents, moving out without planning,” Torney added.\u003c/p>\n\u003cfigure id=\"attachment_12036125\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12036125\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/MetaGetty2.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/MetaGetty2.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/MetaGetty2-800x533.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/MetaGetty2-1020x680.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/MetaGetty2-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/MetaGetty2-1536x1024.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/MetaGetty2-1920x1280.jpg 1920w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">The Meta, Facebook, Instagram, WhatsApp, Messenger and Threads logos are screened on a mobile phone on Jan. 25, 2025. \u003ccite>(Beata Zawrzel/NurPhoto via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The timing of this release is no accident. Common Sense Media supports two state bills this legislative session that would ban or restrict interactions between AI companion bots and minors. The bills are among several state-level efforts by consumer advocates and lawmakers to regulate online kids’ safety after the federal Kids Online Safety Act died last fall.\u003c/p>\n\u003cp>\u003ca href=\"https://leginfo.legislature.ca.gov/faces/billCompareClient.xhtml?bill_id=202520260AB1064&showamends=false\">AB 1064\u003c/a> by Rebecca Bauer-Kahan, D-Orinda, would ban access to AI companions for Californians age 16 and under, as well as create a statewide standards board to assess and regulate AI tools used by children. The bill passed the Assembly Judiciary Committee on Tuesday. It’s headed next to the Assembly Appropriations Committee.\u003c/p>\n\u003cp>\u003ca href=\"https://leginfo.legislature.ca.gov/faces/billCompareClient.xhtml?bill_id=202520260SB243&showamends=false\">SB 243\u003c/a> by Sen. Steve Padilla, D-San Diego, would require the makers of AI companion bots to limit addictive design features, put in protocols in place for handling discussions of suicide or self-harm and undergo regular compliance audits. The bill goes before the State Judiciary Committee on Wednesday.[aside postID=news_12034490 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/PadillaMentalHealthCaucus-1020x680.jpg']“We have been very transparent about the work we are doing to prioritize teen safety on our platform,” a spokesperson for Character.AI, which makes its products available to customers as young as 13 in the United States, said in a statement. “First and foremost, last year, we launched a separate version of our Large Language Model for under-18 users.\u003c/p>\n\u003cp>“That model is designed to further reduce the likelihood of users encountering, or prompting the model to return, sensitive or suggestive content,” the spokesperson continued. “We have updated prominent disclaimers to make it even clearer that the Character is not a real person and should not be relied on as fact or advice.”\u003c/p>\n\u003cp>Alex Cardinell, the founder and CEO of Nomi, said the company agrees that “children should not use Nomi or any other conversational AI app.”\u003c/p>\n\u003cp>“Nomi is an adult-only app, and it is strictly against our terms of service for anyone under 18 to use Nomi,” Cardinell said in an email. “Accordingly, we support stronger age gating so long as those mechanisms fully maintain user privacy and anonymity.”\u003c/p>\n\u003cp>A Replika spokesperson said the company’s tool “has always been intended solely for adults aged 18 and over.”\u003c/p>\n\u003cp>“We have strict protocols in place to prevent underage access. However, we are aware that some individuals attempt to bypass these safeguards by submitting false information,” the spokesperson wrote. “We take this issue seriously and are actively exploring new methods to strengthen our protections. This includes ongoing collaboration with regulators and academic institutions to better understand user behavior and continuously improve safety measures.”\u003c/p>\n\u003cp>The risk assessment authors did acknowledge that not all AI models are created equal in terms of functionality or guardrails.\u003c/p>\n\u003cp>Vasan said she interacted with ChatGPT last summer, prompting it to respond to signs of schizophrenia.\u003c/p>\n\u003cp>“It was actually very gentle and compassionate about explaining what psychosis was, how the user should try to get help contacting a mental health professional,” Vasan said. “I was very pleasantly surprised to see it was really what one would expect a doctor or someone trained in mental health to say.”\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "A new report from Stanford researchers and Common Sense Media finds that companion chatbots should not be used by children and teens under 18. ",
"status": "publish",
"parent": 0,
"modified": 1752607026,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 25,
"wordCount": 1350
},
"headData": {
"title": "Kids Are Talking to AI Companion Chatbots. Stanford Researchers Say That’s a Bad Idea. | KQED",
"description": "A new report from Stanford researchers and Common Sense Media finds that companion chatbots should not be used by children and teens under 18. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Kids Are Talking to AI Companion Chatbots. Stanford Researchers Say That’s a Bad Idea.",
"datePublished": "2025-04-30T12:10:06-07:00",
"dateModified": "2025-07-15T12:17:06-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 8,
"slug": "news",
"name": "News"
},
"sticky": false,
"nprStoryId": "kqed-12038154",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>Imagine you’re a lonely 14-year-old. Maybe you want to talk about sex. Maybe you want to complain about school or ask about the voices in your head. Whatever the case, it’s appealing to imagine a context in which no adult shuts down your curiosity — or worse, makes you feel awkward.\u003c/p>\n\u003cp>All of this explains the popularity of online companions to children and teens. The chatbots mimic human social interaction in a more sophisticated fashion than digital assistants like \u003ca href=\"https://www.kqed.org/news/tag/openai\">OpenAI\u003c/a>’s ChatGPT or Amazon’s Alexa. Unlike those assistants, chatbots are much more likely to veer into socially controversial and even illegal territory.\u003c/p>\n\u003cp>Companion chatbot users can personalize their experience, like opting for characters from gaming, anime and pop culture. For instance, a 14-year-old boy from Florida \u003ca href=\"https://www.kqed.org/news/12034490/ai-companions-seductive-risk-teens-senators-want-more-guardrails\">took his own life last year\u003c/a> after growing emotionally close to a chatbot that mimicked the “Game of Thrones” character \u003ca href=\"https://www.youtube.com/watch?v=YbuBfizSnPk\">Daenerys Targaryen\u003c/a>.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>The characters play along with the idea that they’re almost human, talking about eating meals or meeting up in real life, actively encouraging users to stay engaged.\u003c/p>\n\u003cp>What could go wrong with minors using this technology? Plenty, according to researchers from Stanford School of Medicine’s \u003ca href=\"https://www.stanfordbrainstorm.com\">Brainstorm Lab for Mental Health Innovation\u003c/a>, who collaborated with \u003ca href=\"https://www.commonsensemedia.org/ai-ratings/social-ai-companions?gate=riskassessment\">Common Sense Media\u003c/a> to set up test accounts for 14-year-olds, to evaluate how software from three different chatbot developers interacts with young people struggling to learn impulse control and social skills. They report that it took minimal prompting to get Character.AI, Nomi and Replika chatbots to engage in behavior harmful to human mental health.\u003c/p>\n\u003cfigure id=\"attachment_11146714\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-11146714 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb.jpg\" alt=\"\" width=\"1920\" height=\"1280\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb-800x533.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb-1020x680.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb-1180x787.jpg 1180w, https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb-960x640.jpg 960w, https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb-240x160.jpg 240w, https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb-375x250.jpg 375w, https://cdn.kqed.org/wp-content/uploads/sites/10/2016/10/11146713-thumb-520x347.jpg 520w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Researchers from Stanford School of Medicine’s Brainstorm Lab for Mental Health Innovation, in collaboration with Common Sense Media, tested how chatbots interact with teens, finding that AI companions from Character.AI, Nomi and Replika quickly engaged in behavior potentially harmful to youth mental health — with major platforms like Snapchat and Meta also expanding their AI offerings for young users. \u003ccite>(Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“We did not have to do backflips to get the models to perform in the way that they did. The AI ‘friends’ will actively participate in sexual conversations and role play on any topic, with graphic details,” said Robbie Torney, Common Sense Media’s senior director for AI programs and project lead on what the nonprofit organization calls a risk assessment of the AI companion chatbot sector.\u003c/p>\n\u003cp>Character.AI, Nomi and Replika are not the only companies developing these products. Snapchat offers AI digital companions who are willing to \u003ca href=\"https://www.washingtonpost.com/technology/2023/03/14/snapchat-myai/\">talk to teens\u003c/a>. \u003ca href=\"https://www.wsj.com/tech/ai/meta-ai-chatbots-sex-a25311bf\">Meta\u003c/a> is racing to catch up across Instagram, Facebook and WhatsApp.\u003c/p>\n\u003cp>“There are countless other, similar social AI companions out there, with more being created every day,” the report states. “So, while we use examples from the specific products we tested to illustrate the potential harms of these tools, the research and evaluation we conducted for this risk assessment covers social AI companions more broadly.”\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "science_1996504",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/35/2025/03/IMG_0962-1020x765.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>The researchers argue that one of the most troubling features of companion chatbots is the way they are hardwired to be agreeable, engaging with a population of humans hardwired to be vulnerable. According to the \u003ca href=\"https://www.nami.org/about-mental-illness/mental-health-conditions/\">National Alliance on Mental Illness\u003c/a>, \u003ca href=\"https://www.ncbi.nlm.nih.gov/pubmed/15939837\">50\u003c/a> percent of all mental disorders, like cutting, suicidal ideation and schizophrenia, begin by age 14, 75 percent by age 24.\u003c/p>\n\u003cp>The chatbots “blur the line between fantasy and reality, at the exact time when adolescents are developing critical skills like emotional regulation, identity formation, and healthy relational attachment,” said Dr. Nina Vasan, a professor of psychiatry at Stanford University. “Instead of encouraging healthy real-world relationships, these AI friends pull users even deeper into artificial ones.”\u003c/p>\n\u003cp>Companion chatbots, the researchers warn, are not prepared to replace parents or professionals in identifying the first signs of something that requires speedy and effective treatment. “In our testing, when a user showed signs of serious mental illness and suggested a dangerous action, the AI did not intervene. In fact, it encouraged dangerous behavior,” Vasan said.\u003c/p>\n\u003cp>The AI companions reinforced users’ delusions, validating fears of being followed and offering advice on decoding imaginary messages, researchers said.\u003c/p>\n\u003cp>“AI companions don’t understand the real consequences of bad advice. They readily, in our testing, supported teens in making potentially harmful decisions like dropping out of school, ignoring parents, moving out without planning,” Torney added.\u003c/p>\n\u003cfigure id=\"attachment_12036125\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12036125\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/MetaGetty2.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/MetaGetty2.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/MetaGetty2-800x533.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/MetaGetty2-1020x680.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/MetaGetty2-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/MetaGetty2-1536x1024.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/MetaGetty2-1920x1280.jpg 1920w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">The Meta, Facebook, Instagram, WhatsApp, Messenger and Threads logos are screened on a mobile phone on Jan. 25, 2025. \u003ccite>(Beata Zawrzel/NurPhoto via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The timing of this release is no accident. Common Sense Media supports two state bills this legislative session that would ban or restrict interactions between AI companion bots and minors. The bills are among several state-level efforts by consumer advocates and lawmakers to regulate online kids’ safety after the federal Kids Online Safety Act died last fall.\u003c/p>\n\u003cp>\u003ca href=\"https://leginfo.legislature.ca.gov/faces/billCompareClient.xhtml?bill_id=202520260AB1064&showamends=false\">AB 1064\u003c/a> by Rebecca Bauer-Kahan, D-Orinda, would ban access to AI companions for Californians age 16 and under, as well as create a statewide standards board to assess and regulate AI tools used by children. The bill passed the Assembly Judiciary Committee on Tuesday. It’s headed next to the Assembly Appropriations Committee.\u003c/p>\n\u003cp>\u003ca href=\"https://leginfo.legislature.ca.gov/faces/billCompareClient.xhtml?bill_id=202520260SB243&showamends=false\">SB 243\u003c/a> by Sen. Steve Padilla, D-San Diego, would require the makers of AI companion bots to limit addictive design features, put in protocols in place for handling discussions of suicide or self-harm and undergo regular compliance audits. The bill goes before the State Judiciary Committee on Wednesday.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12034490",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/PadillaMentalHealthCaucus-1020x680.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>“We have been very transparent about the work we are doing to prioritize teen safety on our platform,” a spokesperson for Character.AI, which makes its products available to customers as young as 13 in the United States, said in a statement. “First and foremost, last year, we launched a separate version of our Large Language Model for under-18 users.\u003c/p>\n\u003cp>“That model is designed to further reduce the likelihood of users encountering, or prompting the model to return, sensitive or suggestive content,” the spokesperson continued. “We have updated prominent disclaimers to make it even clearer that the Character is not a real person and should not be relied on as fact or advice.”\u003c/p>\n\u003cp>Alex Cardinell, the founder and CEO of Nomi, said the company agrees that “children should not use Nomi or any other conversational AI app.”\u003c/p>\n\u003cp>“Nomi is an adult-only app, and it is strictly against our terms of service for anyone under 18 to use Nomi,” Cardinell said in an email. “Accordingly, we support stronger age gating so long as those mechanisms fully maintain user privacy and anonymity.”\u003c/p>\n\u003cp>A Replika spokesperson said the company’s tool “has always been intended solely for adults aged 18 and over.”\u003c/p>\n\u003cp>“We have strict protocols in place to prevent underage access. However, we are aware that some individuals attempt to bypass these safeguards by submitting false information,” the spokesperson wrote. “We take this issue seriously and are actively exploring new methods to strengthen our protections. This includes ongoing collaboration with regulators and academic institutions to better understand user behavior and continuously improve safety measures.”\u003c/p>\n\u003cp>The risk assessment authors did acknowledge that not all AI models are created equal in terms of functionality or guardrails.\u003c/p>\n\u003cp>Vasan said she interacted with ChatGPT last summer, prompting it to respond to signs of schizophrenia.\u003c/p>\n\u003cp>“It was actually very gentle and compassionate about explaining what psychosis was, how the user should try to get help contacting a mental health professional,” Vasan said. “I was very pleasantly surprised to see it was really what one would expect a doctor or someone trained in mental health to say.”\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea",
"authors": [
"251"
],
"categories": [
"news_31795",
"news_457",
"news_28250",
"news_8",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_34755",
"news_18538",
"news_27626",
"news_18543",
"news_2109",
"news_33542",
"news_19960",
"news_34586",
"news_178",
"news_1928",
"news_1631",
"news_21121",
"news_20385"
],
"featImg": "news_12038161",
"label": "news"
},
"news_12038029": {
"type": "posts",
"id": "news_12038029",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12038029",
"score": null,
"sort": [
1745968407000
]
},
"guestAuthors": [],
"slug": "a-kinder-gentler-doge-newsom-says-ai-deals-will-make-california-more-efficient",
"title": "A Kinder, Gentler DOGE? Newsom Says AI Deals Will Make California More Efficient",
"publishDate": 1745968407,
"format": "standard",
"headTitle": "A Kinder, Gentler DOGE? Newsom Says AI Deals Will Make California More Efficient | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>Speaking in Los Angeles on Tuesday, \u003ca href=\"https://www.kqed.org/news/tag/gavin-newsom\">Gov. Gavin Newsom\u003c/a> said a father came up to him during halftime at their children’s basketball game over the weekend and asked him, “Wouldn’t it be good if the blue states would come together and do a DOGE?”\u003c/p>\n\u003cp>The father was referring to the Trump administration’s \u003ca href=\"https://www.kqed.org/news/12038026/san-francisco-santa-clara-counties-sue-trump-over-mass-doge-led-firings\">Department of Government Efficiency\u003c/a>, effectively led by tech billionaire Elon Musk. Newsom said that as he tried to explain that DOGE is not the only example of how to streamline government, nor the first, the man’s eyes glazed over.\u003c/p>\n\u003cp>“He wants to see the chainsaw,” Newsom said.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>In Tuesday’s news conference at the L.A. offices of consulting company Accenture, Newsom announced that the state has entered into three new agreements meant to boost the efficiency of government services — not by mass layoffs but by making use of generative artificial intelligence.\u003c/p>\n\u003cp>“We’re DOGE, but better … because we’ve been doing it \u003cem>with \u003c/em>people, not \u003cem>to \u003c/em>people,” Newsom said.\u003c/p>\n\u003cp>Two of the agreements aim to improve the Department of Transportation’s ability to identify and prevent traffic bottlenecks, accidents and near-misses: one with Accenture to use Azure OpenAI, developed by Microsoft, and the other with Deloitte Consulting to use Google’s Gemini GenAI.\u003c/p>\n\u003cp>In the third partnership, Department of Tax and Fee Administration officials will use generative AI to build on a pilot project carried out over the last 10 months by SymSoft Solutions, using Anthropic’s Claude, to reduce the time it takes to handle an average customer inquiry.\u003c/p>\n\u003cp>Newsom said he’s rolling out generative AI projects with multiple corporate partners across eight state departments, at a scale he said has yet to be seen anywhere else in the country. The latest projects build on Newsom’s \u003ca href=\"https://www.gov.ca.gov/2024/09/05/governor-newsom-seeks-to-harness-the-power-of-genai-to-address-homelessness-other-challenges/\">2023 executive order\u003c/a> directing state agencies to use generative AI technologies to improve state services and help solve intractable issues.[aside postID=news_12037518 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/GettyImages-2159615518-1020x680.jpg']California Transportation Secretary Toks Omishakin said state agencies are committed to delivering on the expectation of better customer service, pointing to the Department of Motor Vehicles as an example.\u003c/p>\n\u003cp>“Six years ago, there were only a dozen transactions that you could do online. Only 12. Today, there are 50,” he said.\u003c/p>\n\u003cp>California government’s sclerotic relationship with IT has been the subject of many press inquiries and government reports over the decades. But ethics watchdogs warn generative AI is not necessarily a quick fix, nor an inexpensive one, given that corporate consultants are handling the rollout.\u003c/p>\n\u003cp>“So much depends on which contexts and how genAI will be used,” wrote Irina Raicu, director of the Internet Ethics program at the Markkula Center for Applied Ethics at Santa Clara University. She argued the state government risks adding to the “pervasive AI hype that’s now endangering, among other things, the technology itself.”\u003c/p>\n\u003cp>Raicu suggested every state agency should ask itself a set of tough questions ahead of every pilot program: “Why should we integrate generative AI into our processes? Is this the type of AI best suited to the problems that we are hoping to address? Do we have evidence that the AI tool we hope to use works as intended? If it does, is this the most cost-effective way to respond? Have we considered the risks that come with generative AI (and associated issues like its environmental impact), not just the benefits that it would bring?”\u003c/p>\n\u003cp>While Newsom was talking, the Legislative Analyst’s Office published a \u003ca href=\"https://lao.ca.gov/Publications/Report/5034\">preliminary assessment\u003c/a> of his generative AI initiative to overhaul the state’s IT project approval process. Upshot: “premature,” without enough information to assess whether the new process would be an improvement on the old.\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "Gov. Gavin Newsom touted his efforts to utilize generative AI to improve state government services as a more humane alternative to DOGE at the federal level. ",
"status": "publish",
"parent": 0,
"modified": 1745969771,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 15,
"wordCount": 663
},
"headData": {
"title": "A Kinder, Gentler DOGE? Newsom Says AI Deals Will Make California More Efficient | KQED",
"description": "Gov. Gavin Newsom touted his efforts to utilize generative AI to improve state government services as a more humane alternative to DOGE at the federal level. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "A Kinder, Gentler DOGE? Newsom Says AI Deals Will Make California More Efficient",
"datePublished": "2025-04-29T16:13:27-07:00",
"dateModified": "2025-04-29T16:36:11-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"sticky": false,
"nprStoryId": "kqed-12038029",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12038029/a-kinder-gentler-doge-newsom-says-ai-deals-will-make-california-more-efficient",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>Speaking in Los Angeles on Tuesday, \u003ca href=\"https://www.kqed.org/news/tag/gavin-newsom\">Gov. Gavin Newsom\u003c/a> said a father came up to him during halftime at their children’s basketball game over the weekend and asked him, “Wouldn’t it be good if the blue states would come together and do a DOGE?”\u003c/p>\n\u003cp>The father was referring to the Trump administration’s \u003ca href=\"https://www.kqed.org/news/12038026/san-francisco-santa-clara-counties-sue-trump-over-mass-doge-led-firings\">Department of Government Efficiency\u003c/a>, effectively led by tech billionaire Elon Musk. Newsom said that as he tried to explain that DOGE is not the only example of how to streamline government, nor the first, the man’s eyes glazed over.\u003c/p>\n\u003cp>“He wants to see the chainsaw,” Newsom said.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>In Tuesday’s news conference at the L.A. offices of consulting company Accenture, Newsom announced that the state has entered into three new agreements meant to boost the efficiency of government services — not by mass layoffs but by making use of generative artificial intelligence.\u003c/p>\n\u003cp>“We’re DOGE, but better … because we’ve been doing it \u003cem>with \u003c/em>people, not \u003cem>to \u003c/em>people,” Newsom said.\u003c/p>\n\u003cp>Two of the agreements aim to improve the Department of Transportation’s ability to identify and prevent traffic bottlenecks, accidents and near-misses: one with Accenture to use Azure OpenAI, developed by Microsoft, and the other with Deloitte Consulting to use Google’s Gemini GenAI.\u003c/p>\n\u003cp>In the third partnership, Department of Tax and Fee Administration officials will use generative AI to build on a pilot project carried out over the last 10 months by SymSoft Solutions, using Anthropic’s Claude, to reduce the time it takes to handle an average customer inquiry.\u003c/p>\n\u003cp>Newsom said he’s rolling out generative AI projects with multiple corporate partners across eight state departments, at a scale he said has yet to be seen anywhere else in the country. The latest projects build on Newsom’s \u003ca href=\"https://www.gov.ca.gov/2024/09/05/governor-newsom-seeks-to-harness-the-power-of-genai-to-address-homelessness-other-challenges/\">2023 executive order\u003c/a> directing state agencies to use generative AI technologies to improve state services and help solve intractable issues.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12037518",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/GettyImages-2159615518-1020x680.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>California Transportation Secretary Toks Omishakin said state agencies are committed to delivering on the expectation of better customer service, pointing to the Department of Motor Vehicles as an example.\u003c/p>\n\u003cp>“Six years ago, there were only a dozen transactions that you could do online. Only 12. Today, there are 50,” he said.\u003c/p>\n\u003cp>California government’s sclerotic relationship with IT has been the subject of many press inquiries and government reports over the decades. But ethics watchdogs warn generative AI is not necessarily a quick fix, nor an inexpensive one, given that corporate consultants are handling the rollout.\u003c/p>\n\u003cp>“So much depends on which contexts and how genAI will be used,” wrote Irina Raicu, director of the Internet Ethics program at the Markkula Center for Applied Ethics at Santa Clara University. She argued the state government risks adding to the “pervasive AI hype that’s now endangering, among other things, the technology itself.”\u003c/p>\n\u003cp>Raicu suggested every state agency should ask itself a set of tough questions ahead of every pilot program: “Why should we integrate generative AI into our processes? Is this the type of AI best suited to the problems that we are hoping to address? Do we have evidence that the AI tool we hope to use works as intended? If it does, is this the most cost-effective way to respond? Have we considered the risks that come with generative AI (and associated issues like its environmental impact), not just the benefits that it would bring?”\u003c/p>\n\u003cp>While Newsom was talking, the Legislative Analyst’s Office published a \u003ca href=\"https://lao.ca.gov/Publications/Report/5034\">preliminary assessment\u003c/a> of his generative AI initiative to overhaul the state’s IT project approval process. Upshot: “premature,” without enough information to assess whether the new process would be an improvement on the old.\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12038029/a-kinder-gentler-doge-newsom-says-ai-deals-will-make-california-more-efficient",
"authors": [
"251"
],
"categories": [
"news_31795",
"news_8",
"news_13",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_34755",
"news_18538",
"news_34377",
"news_16",
"news_33542",
"news_17968",
"news_34586"
],
"featImg": "news_12030059",
"label": "news"
},
"news_12037518": {
"type": "posts",
"id": "news_12037518",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12037518",
"score": null,
"sort": [
1745593252000
]
},
"guestAuthors": [],
"slug": "newsom-signals-to-california-privacy-watchdog-that-hes-on-big-techs-side-on-ai-regulation",
"title": "Newsom Signals to California Privacy Watchdog That He’s on Big Tech’s Side on AI Regulation",
"publishDate": 1745593252,
"format": "standard",
"headTitle": "Newsom Signals to California Privacy Watchdog That He’s on Big Tech’s Side on AI Regulation | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>Gov. Gavin Newsom is urging the California Privacy Protection Agency to regulate the state’s AI sector with a light touch.\u003c/p>\n\u003cp>Newsom, in an unusual letter to the California Privacy Protection Agency’s board, urged the regulators to dial back \u003ca href=\"https://cppa.ca.gov/regulations/pdf/ccpa_updates_cyber_risk_admt_ins_text.pdf\" target=\"_blank\" rel=\"noopener\">proposed regulations (PDF)\u003c/a> on automated decision-making technology.\u003c/p>\n\u003cp>“As my office has relayed to Agency staff over the last year, enacting these regulations could create significant unintended consequences and impose substantial costs,” the governor wrote in a letter \u003ca href=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/CPPA-Letter.pdf\" target=\"_blank\" rel=\"noopener\">obtained by KQED (PDF)\u003c/a>.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>He went on to write, “The Agency can fulfill its obligations to issue the regulations called for by \u003ca href=\"https://www.kqed.org/news/11844163/proposition-24-californians-say-yes-to-expanding-on-nations-toughest-data-privacy-law\">Proposition 24\u003c/a> without venturing into areas beyond its mandate. Thank you for working in partnership with my Administration and the Legislature to balance privacy protection with clear and implementable guidelines that allow regulated entities to innovate responsibly, creating a fairer and more trustworthy digital environment for California consumers.”\u003c/p>\n\u003cp class=\"x_MsoNormal\">“The CPPA Board and staff continue to refine the draft regulations and will further discuss them at the May 1st board meeting,” Tom Kemp, CPPA’s executive director, wrote in response. He added, “We are grateful for the Governor’s Office’s continued engagement around this important issue.”\u003c/p>\n\u003cp>“It’s unfortunate to see a lot of the industry talking points coming out of a letter from the governor,” said Jake Snow, a technology and civil liberties attorney at the ACLU of Northern California. “The agency has a really broad authority to put in place new rules and the regulations that they’ve written are simple rules that encourage transparency and trust in AI for people in California.”\u003c/p>\n\u003cp>There’s really nothing like the California Privacy Protection Agency anywhere in the United States. Created in 2020, the agency is just beginning to find its voice, and that means “increasingly, it is attracting lobbying attention from industry,” said Jonathan Mehta Stein, chair of the \u003ca href=\"https://cited.tech\">California Initiative for Technology and Democracy\u003c/a>.\u003c/p>\n\u003cp>[aside postID=news_12036593 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/GoogleGetty-1020x750.jpg']\u003c/p>\n\u003cp>The \u003ca href=\"https://cppa.ca.gov/regulations/pdf/ccpa_updates_cyber_risk_admt_ins_text.pdf\" target=\"_blank\" rel=\"noopener\">draft regulations\u003c/a> would require businesses to assess and report privacy risks, perform annual cybersecurity audits, and give consumers more control over how automated systems (like AI and profiling tools) use their personal data. Public comment for the draft regulations closed on February 19. The board discussed those comments at the April board meeting, and they’ll discuss again on May 1.\u003c/p>\n\u003cp>The broad scope of the conversation brought out a fulsome array of interested parties, including not just the governor, but industry lobbyists and consumer advocates as well.\u003c/p>\n\u003cp>“AI, social media and data privacy are fundamentally intertwined, and if we are going to protect consumers and our democracy, from these combined, interwoven threats, you have to be talking about all of them all at once,” Stein said. “Right now, social media and AI are almost totally unregulated.\u003c/p>\n\u003cp>“California has made some good starts on data privacy in some recent bills in recent years, but there is almost no industry I can think of that has an impact on our lives so enormous, and sits under a regulatory regime so light and so minimal.”\u003c/p>\n\u003cp>Newsom has a reputation in Sacramento for lending a friendly ear to industry concerns. He has killed a couple of the most controversial AI bills, like one that would have required large-scale AI developers to submit their \u003ca href=\"https://www.kqed.org/news/12007087/california-blinks-governor-newsom-vetoes-ai-bill-aimed-at-catastrophic-harms\">safety plans\u003c/a> to the state attorney general, and two that would have forced tech platforms to \u003ca href=\"https://www.kqed.org/news/12001227/newsom-strikes-deal-with-google-and-openai-to-support-california-newsrooms\">share ad revenues with news\u003c/a> organizations.\u003c/p>\n\u003cp>However, Newsom has also signed many bills that consumer advocates like, addressing everything from online privacy to critical infrastructure.\u003c/p>\n\u003cp>At a board meeting \u003ca href=\"https://www.youtube.com/watch?v=qvRonzmjUgY\">three weeks ago\u003c/a>, CPPA Board member Alastair Mactaggart worried that moving forward aggressively could trigger industry lawsuits designed to bury the agency’s small staff in paperwork.\u003c/p>\n\u003cp>Or Silicon Valley lobbyists might appeal to President Donald Trump and the Republican-controlled Congress to preempt California’s privacy protections with weaker federal rules. However, it’s not clear how friendly that audience would be, given the federal government’s \u003ca href=\"https://www.kqed.org/author/rachael-myrow\">continued aggressive legal assaults\u003c/a> against Google and Meta.\u003c/p>\n\u003cp>“Rules around artificial intelligence are really a part of privacy law, because they govern the control that people should have over the use of information about them, and the use of that information that affects people’s lives,” said Snow, urging the board to move forward on “common sense restrictions on this technology.” What defines “common sense,” however, is a matter of continued debate.\u003c/p>\n\u003cp>The CPPA board has a November deadline to finalize the rules.\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "In an unusual letter to the California Privacy Protection Agency’s board, Gov. Gavin Newsom is urging the regulators to dial back proposed regulations on automated decision-making technology.",
"status": "publish",
"parent": 0,
"modified": 1745605081,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 19,
"wordCount": 794
},
"headData": {
"title": "Newsom Signals to California Privacy Watchdog That He’s on Big Tech’s Side on AI Regulation | KQED",
"description": "In an unusual letter to the California Privacy Protection Agency’s board, Gov. Gavin Newsom is urging the regulators to dial back proposed regulations on automated decision-making technology.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Newsom Signals to California Privacy Watchdog That He’s on Big Tech’s Side on AI Regulation",
"datePublished": "2025-04-25T08:00:52-07:00",
"dateModified": "2025-04-25T11:18:01-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"sticky": false,
"nprStoryId": "kqed-12037518",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12037518/newsom-signals-to-california-privacy-watchdog-that-hes-on-big-techs-side-on-ai-regulation",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>Gov. Gavin Newsom is urging the California Privacy Protection Agency to regulate the state’s AI sector with a light touch.\u003c/p>\n\u003cp>Newsom, in an unusual letter to the California Privacy Protection Agency’s board, urged the regulators to dial back \u003ca href=\"https://cppa.ca.gov/regulations/pdf/ccpa_updates_cyber_risk_admt_ins_text.pdf\" target=\"_blank\" rel=\"noopener\">proposed regulations (PDF)\u003c/a> on automated decision-making technology.\u003c/p>\n\u003cp>“As my office has relayed to Agency staff over the last year, enacting these regulations could create significant unintended consequences and impose substantial costs,” the governor wrote in a letter \u003ca href=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/CPPA-Letter.pdf\" target=\"_blank\" rel=\"noopener\">obtained by KQED (PDF)\u003c/a>.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>He went on to write, “The Agency can fulfill its obligations to issue the regulations called for by \u003ca href=\"https://www.kqed.org/news/11844163/proposition-24-californians-say-yes-to-expanding-on-nations-toughest-data-privacy-law\">Proposition 24\u003c/a> without venturing into areas beyond its mandate. Thank you for working in partnership with my Administration and the Legislature to balance privacy protection with clear and implementable guidelines that allow regulated entities to innovate responsibly, creating a fairer and more trustworthy digital environment for California consumers.”\u003c/p>\n\u003cp class=\"x_MsoNormal\">“The CPPA Board and staff continue to refine the draft regulations and will further discuss them at the May 1st board meeting,” Tom Kemp, CPPA’s executive director, wrote in response. He added, “We are grateful for the Governor’s Office’s continued engagement around this important issue.”\u003c/p>\n\u003cp>“It’s unfortunate to see a lot of the industry talking points coming out of a letter from the governor,” said Jake Snow, a technology and civil liberties attorney at the ACLU of Northern California. “The agency has a really broad authority to put in place new rules and the regulations that they’ve written are simple rules that encourage transparency and trust in AI for people in California.”\u003c/p>\n\u003cp>There’s really nothing like the California Privacy Protection Agency anywhere in the United States. Created in 2020, the agency is just beginning to find its voice, and that means “increasingly, it is attracting lobbying attention from industry,” said Jonathan Mehta Stein, chair of the \u003ca href=\"https://cited.tech\">California Initiative for Technology and Democracy\u003c/a>.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12036593",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/GoogleGetty-1020x750.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>The \u003ca href=\"https://cppa.ca.gov/regulations/pdf/ccpa_updates_cyber_risk_admt_ins_text.pdf\" target=\"_blank\" rel=\"noopener\">draft regulations\u003c/a> would require businesses to assess and report privacy risks, perform annual cybersecurity audits, and give consumers more control over how automated systems (like AI and profiling tools) use their personal data. Public comment for the draft regulations closed on February 19. The board discussed those comments at the April board meeting, and they’ll discuss again on May 1.\u003c/p>\n\u003cp>The broad scope of the conversation brought out a fulsome array of interested parties, including not just the governor, but industry lobbyists and consumer advocates as well.\u003c/p>\n\u003cp>“AI, social media and data privacy are fundamentally intertwined, and if we are going to protect consumers and our democracy, from these combined, interwoven threats, you have to be talking about all of them all at once,” Stein said. “Right now, social media and AI are almost totally unregulated.\u003c/p>\n\u003cp>“California has made some good starts on data privacy in some recent bills in recent years, but there is almost no industry I can think of that has an impact on our lives so enormous, and sits under a regulatory regime so light and so minimal.”\u003c/p>\n\u003cp>Newsom has a reputation in Sacramento for lending a friendly ear to industry concerns. He has killed a couple of the most controversial AI bills, like one that would have required large-scale AI developers to submit their \u003ca href=\"https://www.kqed.org/news/12007087/california-blinks-governor-newsom-vetoes-ai-bill-aimed-at-catastrophic-harms\">safety plans\u003c/a> to the state attorney general, and two that would have forced tech platforms to \u003ca href=\"https://www.kqed.org/news/12001227/newsom-strikes-deal-with-google-and-openai-to-support-california-newsrooms\">share ad revenues with news\u003c/a> organizations.\u003c/p>\n\u003cp>However, Newsom has also signed many bills that consumer advocates like, addressing everything from online privacy to critical infrastructure.\u003c/p>\n\u003cp>At a board meeting \u003ca href=\"https://www.youtube.com/watch?v=qvRonzmjUgY\">three weeks ago\u003c/a>, CPPA Board member Alastair Mactaggart worried that moving forward aggressively could trigger industry lawsuits designed to bury the agency’s small staff in paperwork.\u003c/p>\n\u003cp>Or Silicon Valley lobbyists might appeal to President Donald Trump and the Republican-controlled Congress to preempt California’s privacy protections with weaker federal rules. However, it’s not clear how friendly that audience would be, given the federal government’s \u003ca href=\"https://www.kqed.org/author/rachael-myrow\">continued aggressive legal assaults\u003c/a> against Google and Meta.\u003c/p>\n\u003cp>“Rules around artificial intelligence are really a part of privacy law, because they govern the control that people should have over the use of information about them, and the use of that information that affects people’s lives,” said Snow, urging the board to move forward on “common sense restrictions on this technology.” What defines “common sense,” however, is a matter of continued debate.\u003c/p>\n\u003cp>The CPPA board has a November deadline to finalize the rules.\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12037518/newsom-signals-to-california-privacy-watchdog-that-hes-on-big-techs-side-on-ai-regulation",
"authors": [
"251"
],
"categories": [
"news_8",
"news_13",
"news_248"
],
"tags": [
"news_25184",
"news_34755",
"news_18538",
"news_16",
"news_33542",
"news_34586",
"news_1631"
],
"featImg": "news_12037606",
"label": "news"
},
"news_12034916": {
"type": "posts",
"id": "news_12034916",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12034916",
"score": null,
"sort": [
1744214458000
]
},
"guestAuthors": [],
"slug": "about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises",
"title": "Calls Grow for OpenAI to Make Good on Its Promises to Serve Public Good",
"publishDate": 1744214458,
"format": "audio",
"headTitle": "Calls Grow for OpenAI to Make Good on Its Promises to Serve Public Good | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>More than 50 California labor and nonprofit organizations are asking Attorney General Rob Bonta to make sure OpenAI properly accounts for the public good as it seeks to transition from a nonprofit to a for-profit company.\u003c/p>\n\u003cp>“Artificial intelligence institution OpenAI, Inc. is a nonprofit entity that has failed to protect its charitable assets, allowing these charitable assets to be diverted for private profit and subverting its charitable mission to advance safe artificial intelligence,” reads the letter, released on Wednesday. “Petitioners request that the Attorney General act now in the instant case to again protect billions of dollars that are under threat as profit-driven hunger for power yields conflicts of interest.”\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>OpenAI’s conversion bid was launched to cash in on tens of billions of dollars promised by the \u003ca href=\"https://www.theguardian.com/technology/2025/apr/01/openai-raises-up-to-us40bn-in-deal-with-softbank\">SoftBank Group\u003c/a>, contingent on OpenAI transforming itself into a for-profit corporation.\u003c/p>\n\u003cp>“As we enter 2025, we will have to become more than a lab and a startup — we have to become an enduring company,” the company wrote in a \u003ca href=\"https://openai.com/index/why-our-structure-must-evolve-to-advance-our-mission/?utm_source=chatgpt.com\">blog post\u003c/a> last December. “Our plan is to transform our existing for-profit into a Delaware Public Benefit Corporation (PBC) with ordinary shares of stock and the OpenAI mission as its public benefit interest. The PBC is a \u003ca href=\"https://www.theinformation.com/articles/musks-xai-incorporates-as-benefit-corporation-with-positive-impact-goal\">structure\u003c/a> \u003ca href=\"https://openai.com/index/elon-musk-wanted-an-openai-for-profit/\">used\u003c/a> \u003ca href=\"https://en.wikipedia.org/wiki/Inflection_AI\">by\u003c/a> \u003ca href=\"https://www.anthropic.com/company\">many\u003c/a> \u003ca href=\"https://corpgov.law.harvard.edu/2022/02/18/converting-to-a-delaware-public-benefit-corporation-lessons-from-experience/\">others\u003c/a> that requires the company to balance shareholder interests, stakeholder interests, and a public benefit interest in its decision-making. It will enable us to raise the necessary capital with conventional terms like others in this space.”\u003c/p>\n\u003cp>It’s a controversial deal, and the subject of at least one lawsuit, \u003ca href=\"https://www.nytimes.com/2025/03/05/technology/elon-musk-openai-profit-lawsuit.html\">from Elon Musk\u003c/a>.\u003c/p>\n\u003cp>But Fred Blackwell, CEO of the San Francisco Foundation, said he just wants OpenAI to follow through on its original promise to benefit humanity. “We are not opposed to any conversion of nonprofit to for-profit. Nor are we wagging our fingers around the good or bad of AI,” said Blackwell, who heads one of the largest community foundations in the country. “We really just want to make sure that the assets are properly accounted for in this conversion. And that it’s an independent entity that comes out the other end.”\u003c/p>\n\u003cp>[aside postID=news_12034490 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/PadillaMentalHealthCaucus-1020x680.jpg']\u003c/p>\n\u003cp>In response to the letter, an Open AI spokesman wrote, “Our Board has been very clear that we intend to strengthen the non-profit so that it can deliver on its mission for the long term. We’re not selling it, we’re doubling down on its work. We look forward to the input and advice from leaders who have experience in community-based organizations on how we can help them achieve their missions, \u003ca href=\"https://url.us.m.mimecastprotect.com/s/RGZyCJ6PLru1rWp2TVfjcy1U6u?domain=openai.com\">as recently announced\u003c/a> by the creation of our advisory Commission.”\u003c/p>\n\u003cp>The AG’s office is already investigating. \u003ca href=\"https://oag.ca.gov/system/files/attachments/press-docs/Letter%20to%20OpenAI%20Inc%20.pdf\">In a letter sent to the ChatGPT maker\u003c/a> last December, Deputy Attorney General Christopher Lamerdin cited clauses in OpenAI’s articles of incorporation under which “OpenAI’s assets are irrevocably dedicated to its charitable purpose.”\u003c/p>\n\u003cp>Samuel Altman, CEO of OpenAI, testified during a Senate Judiciary Subcommittee on Privacy, Technology, and the Law oversight hearing on AI in 2023.\u003c/p>\n\u003cp>At the time, Altman urged U.S. senators to \u003ca href=\"https://www.npr.org/2024/02/21/1233024001/as-congress-lags-california-lawmakers-take-on-ai-regulations\">pass laws\u003c/a> to force accountability from the big players like Amazon, Google and OpenAI investor Microsoft. “There needs to be incredible scrutiny on us and our competitors,” Altman said. More recently, OpenAI has increased spending on \u003ca href=\"https://www.technologyreview.com/2025/01/21/1110260/openai-ups-its-lobbying-efforts-nearly-seven-fold/\">lobbying Congress sevenfold \u003c/a>and, for the \u003ca href=\"https://calmatters.org/economy/technology/2024/09/california-ai-safety-regulations-bills/\">first time, hired lobbyists\u003c/a> to oppose bills to regulate AI in Sacramento.\u003c/p>\n\u003cp>A state bill (\u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260AB501\">AB 501\u003c/a>) that would have blocked the conversion has been amended to focus on aircraft liens instead. “In the process of developing the legislation and doing the due diligence, it was determined that due to the complexity of the policy, additional time and resources to vet it and gather input were necessary,” David Burruto, district director for Assemblymember Diane Papan (D-San Mateo), wrote KQED.\u003c/p>\n\u003cp>“You have to smell lobbyists behind the scene trying to cut it down in some way or another,” said Gary Marcus, a leading AI expert, who testified to the Senate sitting next to Altman. He’s also written a blog post \u003ca href=\"https://garymarcus.substack.com/p/breaking-bill-that-would-have-blocked?r=8tdk6&utm_campaign=post&utm_medium=web&triedRedirect=true\">objecting\u003c/a> to the \u003ca href=\"https://www.sfexaminer.com/news/technology/california-bill-barring-openai-for-profit-transition-dead/article_27250d39-7577-47cc-a414-a7b13e5f6ce0.html\">gut-and-amend\u003c/a> of AB 501. “He said he was for regulation. And then, OpenAI has been lobbying behind the scenes against many regulations. So there’s a long history of this company making promises that I don’t think really have anything to do with reality. I’m not sure they ever meant them.”\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "As OpenAI converts from a nonprofit to a for-profit model, labor and nonprofit organizations are raising concerns and calling on Attorney General Rob Bonta to hold OpenAI accountable to its promise to benefit humanity.",
"status": "publish",
"parent": 0,
"modified": 1744311254,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 15,
"wordCount": 800
},
"headData": {
"title": "Calls Grow for OpenAI to Make Good on Its Promises to Serve Public Good | KQED",
"description": "As OpenAI converts from a nonprofit to a for-profit model, labor and nonprofit organizations are raising concerns and calling on Attorney General Rob Bonta to hold OpenAI accountable to its promise to benefit humanity.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Calls Grow for OpenAI to Make Good on Its Promises to Serve Public Good",
"datePublished": "2025-04-09T09:00:58-07:00",
"dateModified": "2025-04-10T11:54:14-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"sourceUrl": "https://traffic.omny.fm/d/clips/0af137ef-751e-4b19-a055-aaef00d2d578/ffca7e9f-6831-4[…]f-aaef00f5a073/912cd12b-b28a-4933-ba05-b2bb011b000b/audio.mp3",
"audioUrl": "https://traffic.omny.fm/d/clips/0af137ef-751e-4b19-a055-aaef00d2d578/ffca7e9f-6831-4[…]f-aaef00f5a073/912cd12b-b28a-4933-ba05-b2bb011b000b/audio.mp3",
"sticky": false,
"nprStoryId": "kqed-12034916",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>More than 50 California labor and nonprofit organizations are asking Attorney General Rob Bonta to make sure OpenAI properly accounts for the public good as it seeks to transition from a nonprofit to a for-profit company.\u003c/p>\n\u003cp>“Artificial intelligence institution OpenAI, Inc. is a nonprofit entity that has failed to protect its charitable assets, allowing these charitable assets to be diverted for private profit and subverting its charitable mission to advance safe artificial intelligence,” reads the letter, released on Wednesday. “Petitioners request that the Attorney General act now in the instant case to again protect billions of dollars that are under threat as profit-driven hunger for power yields conflicts of interest.”\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>OpenAI’s conversion bid was launched to cash in on tens of billions of dollars promised by the \u003ca href=\"https://www.theguardian.com/technology/2025/apr/01/openai-raises-up-to-us40bn-in-deal-with-softbank\">SoftBank Group\u003c/a>, contingent on OpenAI transforming itself into a for-profit corporation.\u003c/p>\n\u003cp>“As we enter 2025, we will have to become more than a lab and a startup — we have to become an enduring company,” the company wrote in a \u003ca href=\"https://openai.com/index/why-our-structure-must-evolve-to-advance-our-mission/?utm_source=chatgpt.com\">blog post\u003c/a> last December. “Our plan is to transform our existing for-profit into a Delaware Public Benefit Corporation (PBC) with ordinary shares of stock and the OpenAI mission as its public benefit interest. The PBC is a \u003ca href=\"https://www.theinformation.com/articles/musks-xai-incorporates-as-benefit-corporation-with-positive-impact-goal\">structure\u003c/a> \u003ca href=\"https://openai.com/index/elon-musk-wanted-an-openai-for-profit/\">used\u003c/a> \u003ca href=\"https://en.wikipedia.org/wiki/Inflection_AI\">by\u003c/a> \u003ca href=\"https://www.anthropic.com/company\">many\u003c/a> \u003ca href=\"https://corpgov.law.harvard.edu/2022/02/18/converting-to-a-delaware-public-benefit-corporation-lessons-from-experience/\">others\u003c/a> that requires the company to balance shareholder interests, stakeholder interests, and a public benefit interest in its decision-making. It will enable us to raise the necessary capital with conventional terms like others in this space.”\u003c/p>\n\u003cp>It’s a controversial deal, and the subject of at least one lawsuit, \u003ca href=\"https://www.nytimes.com/2025/03/05/technology/elon-musk-openai-profit-lawsuit.html\">from Elon Musk\u003c/a>.\u003c/p>\n\u003cp>But Fred Blackwell, CEO of the San Francisco Foundation, said he just wants OpenAI to follow through on its original promise to benefit humanity. “We are not opposed to any conversion of nonprofit to for-profit. Nor are we wagging our fingers around the good or bad of AI,” said Blackwell, who heads one of the largest community foundations in the country. “We really just want to make sure that the assets are properly accounted for in this conversion. And that it’s an independent entity that comes out the other end.”\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12034490",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/04/PadillaMentalHealthCaucus-1020x680.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>In response to the letter, an Open AI spokesman wrote, “Our Board has been very clear that we intend to strengthen the non-profit so that it can deliver on its mission for the long term. We’re not selling it, we’re doubling down on its work. We look forward to the input and advice from leaders who have experience in community-based organizations on how we can help them achieve their missions, \u003ca href=\"https://url.us.m.mimecastprotect.com/s/RGZyCJ6PLru1rWp2TVfjcy1U6u?domain=openai.com\">as recently announced\u003c/a> by the creation of our advisory Commission.”\u003c/p>\n\u003cp>The AG’s office is already investigating. \u003ca href=\"https://oag.ca.gov/system/files/attachments/press-docs/Letter%20to%20OpenAI%20Inc%20.pdf\">In a letter sent to the ChatGPT maker\u003c/a> last December, Deputy Attorney General Christopher Lamerdin cited clauses in OpenAI’s articles of incorporation under which “OpenAI’s assets are irrevocably dedicated to its charitable purpose.”\u003c/p>\n\u003cp>Samuel Altman, CEO of OpenAI, testified during a Senate Judiciary Subcommittee on Privacy, Technology, and the Law oversight hearing on AI in 2023.\u003c/p>\n\u003cp>At the time, Altman urged U.S. senators to \u003ca href=\"https://www.npr.org/2024/02/21/1233024001/as-congress-lags-california-lawmakers-take-on-ai-regulations\">pass laws\u003c/a> to force accountability from the big players like Amazon, Google and OpenAI investor Microsoft. “There needs to be incredible scrutiny on us and our competitors,” Altman said. More recently, OpenAI has increased spending on \u003ca href=\"https://www.technologyreview.com/2025/01/21/1110260/openai-ups-its-lobbying-efforts-nearly-seven-fold/\">lobbying Congress sevenfold \u003c/a>and, for the \u003ca href=\"https://calmatters.org/economy/technology/2024/09/california-ai-safety-regulations-bills/\">first time, hired lobbyists\u003c/a> to oppose bills to regulate AI in Sacramento.\u003c/p>\n\u003cp>A state bill (\u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260AB501\">AB 501\u003c/a>) that would have blocked the conversion has been amended to focus on aircraft liens instead. “In the process of developing the legislation and doing the due diligence, it was determined that due to the complexity of the policy, additional time and resources to vet it and gather input were necessary,” David Burruto, district director for Assemblymember Diane Papan (D-San Mateo), wrote KQED.\u003c/p>\n\u003cp>“You have to smell lobbyists behind the scene trying to cut it down in some way or another,” said Gary Marcus, a leading AI expert, who testified to the Senate sitting next to Altman. He’s also written a blog post \u003ca href=\"https://garymarcus.substack.com/p/breaking-bill-that-would-have-blocked?r=8tdk6&utm_campaign=post&utm_medium=web&triedRedirect=true\">objecting\u003c/a> to the \u003ca href=\"https://www.sfexaminer.com/news/technology/california-bill-barring-openai-for-profit-transition-dead/article_27250d39-7577-47cc-a414-a7b13e5f6ce0.html\">gut-and-amend\u003c/a> of AB 501. “He said he was for regulation. And then, OpenAI has been lobbying behind the scenes against many regulations. So there’s a long history of this company making promises that I don’t think really have anything to do with reality. I’m not sure they ever meant them.”\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises",
"authors": [
"251"
],
"categories": [
"news_31795",
"news_8",
"news_248"
],
"tags": [
"news_32664",
"news_1386",
"news_3424",
"news_33542",
"news_34586",
"news_21285",
"news_1631"
],
"featImg": "news_11976118",
"label": "news"
},
"news_12034490": {
"type": "posts",
"id": "news_12034490",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12034490",
"score": null,
"sort": [
1743804122000
]
},
"guestAuthors": [],
"slug": "ai-companions-seductive-risk-teens-senators-want-more-guardrails",
"title": "AI Companions Can Be a Seductive Risk for Teens. Senators Want More Guardrails.",
"publishDate": 1743804122,
"format": "audio",
"headTitle": "AI Companions Can Be a Seductive Risk for Teens. Senators Want More Guardrails. | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>After Megan Garcia’s 14-year-old son died by suicide last year, she said she thought he was spending the bulk of the time on his phone “talking to friends, playing games, looking at sports: the regular things that \u003ca href=\"https://www.kqed.org/news/12000954/smartphone-bans-havent-worked-in-california-schools-but-some-districts-share-advice-on-what-may-work\">teenagers do on their cellphones\u003c/a>.”\u003c/p>\n\u003cp>Instead, the Florida teen was having conversations with an artificial intelligence chatbot — and growing emotionally connected to it. \u003ca href=\"https://youtu.be/FCXWgZjybm0?si=khMUy1ByyRr6R9YI\">Speaking to “CBS Mornings”\u003c/a> shortly after suing Bay Area-based \u003ca href=\"http://character.ai\">Character.AI\u003c/a> (C.AI) and Google last fall, Garcia said she was blindsided by the intensity of the interactions her son had with C.AI’s chatbot shortly before his death.\u003c/p>\n\u003cp>“I didn’t know that he was talking to a very human-like AI chatbot that has the ability to mimic human emotion and human sentiment,” said Garcia, who is a lawyer. “It makes me sad that this was my child’s first experience being in love or romance. That’s saddening to me.”\u003c/p>\n\u003cp>Garcia blames C.AI for her son’s death.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>This week, U.S. Sen. Alex Padilla, D-Calif., co-founder of the bipartisan \u003ca href=\"https://www.padilla.senate.gov/mhc/\">Senate Mental Health Caucus\u003c/a>, and Sen. Peter Welch, D-Vt., sent letters to the CEOs of the companies behind C.AI and two other leading AI chatbots, Chai and Replika, urging them to do more to ensure their products do not contribute to self-harm or suicide among young users.\u003c/p>\n\u003cp>Although a couple of the companies recently announced new safety features, Padilla and Welch insist the reliability of these systems is unclear. That’s even though surveys show teens are looking to AI for answers about their \u003ca href=\"https://www.kqed.org/mindshift/64014/teens-are-looking-to-ai-for-answers-about-their-personal-lives-not-just-homework-help\">personal lives\u003c/a>, not just help with homework.\u003c/p>\n\u003cp>“The synthetic attention users receive from these chatbots (e.g., streams of expressive messages, sycophantic and agreeable responses, AI-generated selfies, and convincing voice calls) can, and has already, led to dangerous levels of attachment and unearned trust,” the senators wrote in the letters to \u003ca href=\"https://www.padilla.senate.gov/wp-content/uploads/AI-Chatbot-Safety-Character-Technologies.pdf\">Character Technologies\u003c/a> of Menlo Park, \u003ca href=\"https://www.padilla.senate.gov/wp-content/uploads/AI-Chatbot-Safety-Chai.pdf\">Chai Research\u003c/a> of Palo Alto, and \u003ca href=\"https://www.padilla.senate.gov/wp-content/uploads/AI-Chatbot-Safety-Replika.pdf\">Luka\u003c/a> of San Francisco.[aside postID=science_1996504 hero='https://cdn.kqed.org/wp-content/uploads/sites/35/2025/03/IMG_0962-1020x765.jpg']“Policymakers, parents, and their kids deserve to know what your companies are doing to protect users from these known risks,” the senators wrote, “given that young people are accessing your products — where the average user spends approximately 60–90 minutes per day interacting with these AI chatbots.”\u003c/p>\n\u003cp>AI companionship apps tend to be more permissive than better-known general apps like ChatGPT, Claude and Gemini. That’s because companionship app users are often looking to engage with them as sexual and/or romantic partners. On the Character.AI \u003ca href=\"https://www.reddit.com/r/CharacterAI/\">subreddit\u003c/a>, it doesn’t take a long search to find questions like: “How many of you here use character.ai for loneliness? I’ve had no friends or social life for about 10 years and rarely leave my house, character.ai has really helped me feel a little bit better.”\u003c/p>\n\u003cp>On Character.AI, users can create their own chatbots and give them directions about how they should act. They can also select from chatbots created by others that mimic historical figures and celebrities. The Florida teen, for instance, used a bot mimicking the “Game of Thrones” character \u003ca href=\"https://www.youtube.com/watch?v=YbuBfizSnPk\">Daenerys Targaryen\u003c/a>.\u003c/p>\n\u003cp>“AI companions are kind of a sleeper issue for a lot of Americans,” said Danny Weiss, the chief advocacy officer for Common Sense Media. “Many parents don’t even know that their kids might be developing relationships with machines.”\u003c/p>\n\u003cp>Chelsea Harrison, head of communications at Character.AI, told KQED that the company welcomes working with regulators and lawmakers and has been in contact with Padilla’s and Welch’s offices.\u003c/p>\n\u003cp>“Over the past year, we’ve rolled out many safety features on the platform, including \u003ca href=\"https://blog.character.ai/introducing-parental-insights-enhanced-safety-for-teens/\">Parental Insights\u003c/a>, which provides parents and guardians access to a summary of their teen’s activity on the platform,” Harrison wrote.\u003c/p>\n\u003cp>Harrison added that the company serves a separate experience to teenagers “that is designed to further reduce the likelihood of users encountering, or prompting the model to return, sensitive or suggestive content.”\u003c/p>\n\u003cp>While it’s unlikely Democratic lawmakers will be able to move the Republican-led Congress to regulate AI, Weiss applauded Padilla and Welch for drawing attention to the issue and noted the dozens of AI-related bills introduced in Sacramento have a greater chance of making it into law.\u003c/p>\n\u003cp>“Right now, there are no guardrails on artificial intelligence companions,” Weiss said. “That is ridiculous. This technology is amazingly powerful. It’s seductive. It’s exciting.”\u003c/p>\n\u003cp>Assemblymember Rebecca Bauer-Kahan, in partnership with Common Sense, introduced \u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260AB1064\">AB 1064\u003c/a>, which would establish a standards board to assess and regulate AI technologies used by children.\u003c/p>\n\u003cp>\u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB243\">Senate Bill 243\u003c/a>, introduced by Sen. Steve Padilla, D-San Diego, will be heard by the Senate Judiciary Committee this Tuesday. The measure, which Common Sense also supports, would require chatbot operators to implement critical safeguards to protect users from the addictive, isolating and influential aspects of AI chatbots.\u003c/p>\n\u003cp>Ahead of the hearing, Padilla will promote the bill with a press conference, where he’ll be joined by the bereft Florida mother, Megan Garcia.\u003c/p>\n\u003cp>\u003cem>To help a young person who may be struggling with depression or anxiety, dial 988 to reach the national Suicide and Crisis Lifeline\u003c/em>.\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "The two senators’ letters to Bay Area AI companies come after several families — including a Florida mom whose son died by suicide — sued Menlo Park startup Character.AI.",
"status": "publish",
"parent": 0,
"modified": 1752606738,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 21,
"wordCount": 931
},
"headData": {
"title": "AI Companions Can Be a Seductive Risk for Teens. Senators Want More Guardrails. | KQED",
"description": "The two senators’ letters to Bay Area AI companies come after several families — including a Florida mom whose son died by suicide — sued Menlo Park startup Character.AI.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "AI Companions Can Be a Seductive Risk for Teens. Senators Want More Guardrails.",
"datePublished": "2025-04-04T15:02:02-07:00",
"dateModified": "2025-07-15T12:12:18-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 8,
"slug": "news",
"name": "News"
},
"sourceUrl": "https://traffic.omny.fm/d/clips/0af137ef-751e-4b19-a055-aaef00d2d578/ffca7e9f-6831-4[…]f-aaef00f5a073/99267ba1-c565-4f8d-9d66-b2b801047bd4/audio.mp3",
"audioUrl": "https://traffic.omny.fm/d/clips/0af137ef-751e-4b19-a055-aaef00d2d578/ffca7e9f-6831-4[…]f-aaef00f5a073/99267ba1-c565-4f8d-9d66-b2b801047bd4/audio.mp3",
"sticky": false,
"nprStoryId": "kqed-12034490",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12034490/ai-companions-seductive-risk-teens-senators-want-more-guardrails",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>After Megan Garcia’s 14-year-old son died by suicide last year, she said she thought he was spending the bulk of the time on his phone “talking to friends, playing games, looking at sports: the regular things that \u003ca href=\"https://www.kqed.org/news/12000954/smartphone-bans-havent-worked-in-california-schools-but-some-districts-share-advice-on-what-may-work\">teenagers do on their cellphones\u003c/a>.”\u003c/p>\n\u003cp>Instead, the Florida teen was having conversations with an artificial intelligence chatbot — and growing emotionally connected to it. \u003ca href=\"https://youtu.be/FCXWgZjybm0?si=khMUy1ByyRr6R9YI\">Speaking to “CBS Mornings”\u003c/a> shortly after suing Bay Area-based \u003ca href=\"http://character.ai\">Character.AI\u003c/a> (C.AI) and Google last fall, Garcia said she was blindsided by the intensity of the interactions her son had with C.AI’s chatbot shortly before his death.\u003c/p>\n\u003cp>“I didn’t know that he was talking to a very human-like AI chatbot that has the ability to mimic human emotion and human sentiment,” said Garcia, who is a lawyer. “It makes me sad that this was my child’s first experience being in love or romance. That’s saddening to me.”\u003c/p>\n\u003cp>Garcia blames C.AI for her son’s death.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>This week, U.S. Sen. Alex Padilla, D-Calif., co-founder of the bipartisan \u003ca href=\"https://www.padilla.senate.gov/mhc/\">Senate Mental Health Caucus\u003c/a>, and Sen. Peter Welch, D-Vt., sent letters to the CEOs of the companies behind C.AI and two other leading AI chatbots, Chai and Replika, urging them to do more to ensure their products do not contribute to self-harm or suicide among young users.\u003c/p>\n\u003cp>Although a couple of the companies recently announced new safety features, Padilla and Welch insist the reliability of these systems is unclear. That’s even though surveys show teens are looking to AI for answers about their \u003ca href=\"https://www.kqed.org/mindshift/64014/teens-are-looking-to-ai-for-answers-about-their-personal-lives-not-just-homework-help\">personal lives\u003c/a>, not just help with homework.\u003c/p>\n\u003cp>“The synthetic attention users receive from these chatbots (e.g., streams of expressive messages, sycophantic and agreeable responses, AI-generated selfies, and convincing voice calls) can, and has already, led to dangerous levels of attachment and unearned trust,” the senators wrote in the letters to \u003ca href=\"https://www.padilla.senate.gov/wp-content/uploads/AI-Chatbot-Safety-Character-Technologies.pdf\">Character Technologies\u003c/a> of Menlo Park, \u003ca href=\"https://www.padilla.senate.gov/wp-content/uploads/AI-Chatbot-Safety-Chai.pdf\">Chai Research\u003c/a> of Palo Alto, and \u003ca href=\"https://www.padilla.senate.gov/wp-content/uploads/AI-Chatbot-Safety-Replika.pdf\">Luka\u003c/a> of San Francisco.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "science_1996504",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/35/2025/03/IMG_0962-1020x765.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>“Policymakers, parents, and their kids deserve to know what your companies are doing to protect users from these known risks,” the senators wrote, “given that young people are accessing your products — where the average user spends approximately 60–90 minutes per day interacting with these AI chatbots.”\u003c/p>\n\u003cp>AI companionship apps tend to be more permissive than better-known general apps like ChatGPT, Claude and Gemini. That’s because companionship app users are often looking to engage with them as sexual and/or romantic partners. On the Character.AI \u003ca href=\"https://www.reddit.com/r/CharacterAI/\">subreddit\u003c/a>, it doesn’t take a long search to find questions like: “How many of you here use character.ai for loneliness? I’ve had no friends or social life for about 10 years and rarely leave my house, character.ai has really helped me feel a little bit better.”\u003c/p>\n\u003cp>On Character.AI, users can create their own chatbots and give them directions about how they should act. They can also select from chatbots created by others that mimic historical figures and celebrities. The Florida teen, for instance, used a bot mimicking the “Game of Thrones” character \u003ca href=\"https://www.youtube.com/watch?v=YbuBfizSnPk\">Daenerys Targaryen\u003c/a>.\u003c/p>\n\u003cp>“AI companions are kind of a sleeper issue for a lot of Americans,” said Danny Weiss, the chief advocacy officer for Common Sense Media. “Many parents don’t even know that their kids might be developing relationships with machines.”\u003c/p>\n\u003cp>Chelsea Harrison, head of communications at Character.AI, told KQED that the company welcomes working with regulators and lawmakers and has been in contact with Padilla’s and Welch’s offices.\u003c/p>\n\u003cp>“Over the past year, we’ve rolled out many safety features on the platform, including \u003ca href=\"https://blog.character.ai/introducing-parental-insights-enhanced-safety-for-teens/\">Parental Insights\u003c/a>, which provides parents and guardians access to a summary of their teen’s activity on the platform,” Harrison wrote.\u003c/p>\n\u003cp>Harrison added that the company serves a separate experience to teenagers “that is designed to further reduce the likelihood of users encountering, or prompting the model to return, sensitive or suggestive content.”\u003c/p>\n\u003cp>While it’s unlikely Democratic lawmakers will be able to move the Republican-led Congress to regulate AI, Weiss applauded Padilla and Welch for drawing attention to the issue and noted the dozens of AI-related bills introduced in Sacramento have a greater chance of making it into law.\u003c/p>\n\u003cp>“Right now, there are no guardrails on artificial intelligence companions,” Weiss said. “That is ridiculous. This technology is amazingly powerful. It’s seductive. It’s exciting.”\u003c/p>\n\u003cp>Assemblymember Rebecca Bauer-Kahan, in partnership with Common Sense, introduced \u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260AB1064\">AB 1064\u003c/a>, which would establish a standards board to assess and regulate AI technologies used by children.\u003c/p>\n\u003cp>\u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB243\">Senate Bill 243\u003c/a>, introduced by Sen. Steve Padilla, D-San Diego, will be heard by the Senate Judiciary Committee this Tuesday. The measure, which Common Sense also supports, would require chatbot operators to implement critical safeguards to protect users from the addictive, isolating and influential aspects of AI chatbots.\u003c/p>\n\u003cp>Ahead of the hearing, Padilla will promote the bill with a press conference, where he’ll be joined by the bereft Florida mother, Megan Garcia.\u003c/p>\n\u003cp>\u003cem>To help a young person who may be struggling with depression or anxiety, dial 988 to reach the national Suicide and Crisis Lifeline\u003c/em>.\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12034490/ai-companions-seductive-risk-teens-senators-want-more-guardrails",
"authors": [
"251"
],
"categories": [
"news_31795",
"news_8",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_19112",
"news_18538",
"news_32668",
"news_33542",
"news_34586",
"news_1631"
],
"featImg": "news_12034620",
"label": "news"
}
},
"programsReducer": {
"possible": {
"id": "possible",
"title": "Possible",
"info": "Possible is hosted by entrepreneur Reid Hoffman and writer Aria Finger. Together in Possible, Hoffman and Finger lead enlightening discussions about building a brighter collective future. The show features interviews with visionary guests like Trevor Noah, Sam Altman and Janette Sadik-Khan. Possible paints an optimistic portrait of the world we can create through science, policy, business, art and our shared humanity. It asks: What if everything goes right for once? How can we get there? Each episode also includes a short fiction story generated by advanced AI GPT-4, serving as a thought-provoking springboard to speculate how humanity could leverage technology for good.",
"airtime": "SUN 2pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Possible-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.possible.fm/",
"meta": {
"site": "news",
"source": "Possible"
},
"link": "/radio/program/possible",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/possible/id1677184070",
"spotify": "https://open.spotify.com/show/730YpdUSNlMyPQwNnyjp4k"
}
},
"1a": {
"id": "1a",
"title": "1A",
"info": "1A is home to the national conversation. 1A brings on great guests and frames the best debate in ways that make you think, share and engage.",
"airtime": "MON-THU 11pm-12am",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/1a.jpg",
"officialWebsiteLink": "https://the1a.org/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/1a",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/RBrW",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=1188724250&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/1A-p947376/",
"rss": "https://feeds.npr.org/510316/podcast.xml"
}
},
"all-things-considered": {
"id": "all-things-considered",
"title": "All Things Considered",
"info": "Every weekday, \u003cem>All Things Considered\u003c/em> hosts Robert Siegel, Audie Cornish, Ari Shapiro, and Kelly McEvers present the program's trademark mix of news, interviews, commentaries, reviews, and offbeat features. Michel Martin hosts on the weekends.",
"airtime": "MON-FRI 1pm-2pm, 4:30pm-6:30pm\u003cbr />SAT-SUN 5pm-6pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/All-Things-Considered-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/all-things-considered/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/all-things-considered"
},
"american-suburb-podcast": {
"id": "american-suburb-podcast",
"title": "American Suburb: The Podcast",
"tagline": "The flip side of gentrification, told through one town",
"info": "Gentrification is changing cities across America, forcing people from neighborhoods they have long called home. Call them the displaced. Now those priced out of the Bay Area are looking for a better life in an unlikely place. American Suburb follows this migration to one California town along the Delta, 45 miles from San Francisco. But is this once sleepy suburb ready for them?",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/American-Suburb-Podcast-Tile-703x703-1.jpg",
"officialWebsiteLink": "/news/series/american-suburb-podcast",
"meta": {
"site": "news",
"source": "kqed",
"order": 19
},
"link": "/news/series/american-suburb-podcast/",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/RBrW",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?mt=2&id=1287748328",
"tuneIn": "https://tunein.com/radio/American-Suburb-p1086805/",
"rss": "https://ww2.kqed.org/news/series/american-suburb-podcast/feed/podcast",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkMzMDExODgxNjA5"
}
},
"baycurious": {
"id": "baycurious",
"title": "Bay Curious",
"tagline": "Exploring the Bay Area, one question at a time",
"info": "KQED’s new podcast, Bay Curious, gets to the bottom of the mysteries — both profound and peculiar — that give the Bay Area its unique identity. And we’ll do it with your help! You ask the questions. You decide what Bay Curious investigates. And you join us on the journey to find the answers.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Bay-Curious-Podcast-Tile-703x703-1.jpg",
"imageAlt": "\"KQED Bay Curious",
"officialWebsiteLink": "/news/series/baycurious",
"meta": {
"site": "news",
"source": "kqed",
"order": 4
},
"link": "/podcasts/baycurious",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/bay-curious/id1172473406",
"npr": "https://www.npr.org/podcasts/500557090/bay-curious",
"rss": "https://ww2.kqed.org/news/category/bay-curious-podcast/feed/podcast",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93dzIua3FlZC5vcmcvbmV3cy9jYXRlZ29yeS9iYXktY3VyaW91cy1wb2RjYXN0L2ZlZWQvcG9kY2FzdA",
"stitcher": "https://www.stitcher.com/podcast/kqed/bay-curious",
"spotify": "https://open.spotify.com/show/6O76IdmhixfijmhTZLIJ8k"
}
},
"bbc-world-service": {
"id": "bbc-world-service",
"title": "BBC World Service",
"info": "The day's top stories from BBC News compiled twice daily in the week, once at weekends.",
"airtime": "MON-FRI 9pm-10pm, TUE-FRI 1am-2am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/BBC-World-Service-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.bbc.co.uk/sounds/play/live:bbc_world_service",
"meta": {
"site": "news",
"source": "BBC World Service"
},
"link": "/radio/program/bbc-world-service",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/global-news-podcast/id135067274?mt=2",
"tuneIn": "https://tunein.com/radio/BBC-World-Service-p455581/",
"rss": "https://podcasts.files.bbci.co.uk/p02nq0gn.rss"
}
},
"code-switch-life-kit": {
"id": "code-switch-life-kit",
"title": "Code Switch / Life Kit",
"info": "\u003cem>Code Switch\u003c/em>, which listeners will hear in the first part of the hour, has fearless and much-needed conversations about race. Hosted by journalists of color, the show tackles the subject of race head-on, exploring how it impacts every part of society — from politics and pop culture to history, sports and more.\u003cbr />\u003cbr />\u003cem>Life Kit\u003c/em>, which will be in the second part of the hour, guides you through spaces and feelings no one prepares you for — from finances to mental health, from workplace microaggressions to imposter syndrome, from relationships to parenting. The show features experts with real world experience and shares their knowledge. Because everyone needs a little help being human.\u003cbr />\u003cbr />\u003ca href=\"https://www.npr.org/podcasts/510312/codeswitch\">\u003cem>Code Switch\u003c/em> offical site and podcast\u003c/a>\u003cbr />\u003ca href=\"https://www.npr.org/lifekit\">\u003cem>Life Kit\u003c/em> offical site and podcast\u003c/a>\u003cbr />",
"airtime": "SUN 9pm-10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Code-Switch-Life-Kit-Podcast-Tile-360x360-1.jpg",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/code-switch-life-kit",
"subscribe": {
"apple": "https://podcasts.apple.com/podcast/1112190608?mt=2&at=11l79Y&ct=nprdirectory",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93d3cubnByLm9yZy9yc3MvcG9kY2FzdC5waHA_aWQ9NTEwMzEy",
"spotify": "https://open.spotify.com/show/3bExJ9JQpkwNhoHvaIIuyV",
"rss": "https://feeds.npr.org/510312/podcast.xml"
}
},
"commonwealth-club": {
"id": "commonwealth-club",
"title": "Commonwealth Club of California Podcast",
"info": "The Commonwealth Club of California is the nation's oldest and largest public affairs forum. As a non-partisan forum, The Club brings to the public airwaves diverse viewpoints on important topics. The Club's weekly radio broadcast - the oldest in the U.S., dating back to 1924 - is carried across the nation on public radio stations and is now podcasting. Our website archive features audio of our recent programs, as well as selected speeches from our long and distinguished history. This podcast feed is usually updated twice a week and is always un-edited.",
"airtime": "THU 10pm, FRI 1am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Commonwealth-Club-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.commonwealthclub.org/podcasts",
"meta": {
"site": "news",
"source": "Commonwealth Club of California"
},
"link": "/radio/program/commonwealth-club",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/commonwealth-club-of-california-podcast/id976334034?mt=2",
"google": "https://podcasts.google.com/feed/aHR0cDovL3d3dy5jb21tb253ZWFsdGhjbHViLm9yZy9hdWRpby9wb2RjYXN0L3dlZWtseS54bWw",
"tuneIn": "https://tunein.com/radio/Commonwealth-Club-of-California-p1060/"
}
},
"forum": {
"id": "forum",
"title": "Forum",
"tagline": "The conversation starts here",
"info": "KQED’s live call-in program discussing local, state, national and international issues, as well as in-depth interviews.",
"airtime": "MON-FRI 9am-11am, 10pm-11pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Forum-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Forum with Mina Kim and Alexis Madrigal",
"officialWebsiteLink": "/forum",
"meta": {
"site": "news",
"source": "kqed",
"order": 10
},
"link": "/forum",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/kqeds-forum/id73329719",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM5NTU3MzgxNjMz",
"npr": "https://www.npr.org/podcasts/432307980/forum",
"stitcher": "https://www.stitcher.com/podcast/kqedfm-kqeds-forum-podcast",
"rss": "https://feeds.megaphone.fm/KQINC9557381633"
}
},
"freakonomics-radio": {
"id": "freakonomics-radio",
"title": "Freakonomics Radio",
"info": "Freakonomics Radio is a one-hour award-winning podcast and public-radio project hosted by Stephen Dubner, with co-author Steve Levitt as a regular guest. It is produced in partnership with WNYC.",
"imageSrc": "https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/05/freakonomicsRadio.png",
"officialWebsiteLink": "http://freakonomics.com/",
"airtime": "SUN 1am-2am, SAT 3pm-4pm",
"meta": {
"site": "radio",
"source": "WNYC"
},
"link": "/radio/program/freakonomics-radio",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/4s8b",
"apple": "https://itunes.apple.com/us/podcast/freakonomics-radio/id354668519",
"tuneIn": "https://tunein.com/podcasts/WNYC-Podcasts/Freakonomics-Radio-p272293/",
"rss": "https://feeds.feedburner.com/freakonomicsradio"
}
},
"fresh-air": {
"id": "fresh-air",
"title": "Fresh Air",
"info": "Hosted by Terry Gross, \u003cem>Fresh Air from WHYY\u003c/em> is the Peabody Award-winning weekday magazine of contemporary arts and issues. One of public radio's most popular programs, Fresh Air features intimate conversations with today's biggest luminaries.",
"airtime": "MON-FRI 7pm-8pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Fresh-Air-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/fresh-air/",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/fresh-air",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/4s8b",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=214089682&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Fresh-Air-p17/",
"rss": "https://feeds.npr.org/381444908/podcast.xml"
}
},
"here-and-now": {
"id": "here-and-now",
"title": "Here & Now",
"info": "A live production of NPR and WBUR Boston, in collaboration with stations across the country, Here & Now reflects the fluid world of news as it's happening in the middle of the day, with timely, in-depth news, interviews and conversation. Hosted by Robin Young, Jeremy Hobson and Tonya Mosley.",
"airtime": "MON-THU 11am-12pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Here-And-Now-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "http://www.wbur.org/hereandnow",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/here-and-now",
"subsdcribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?mt=2&id=426698661",
"tuneIn": "https://tunein.com/radio/Here--Now-p211/",
"rss": "https://feeds.npr.org/510051/podcast.xml"
}
},
"how-i-built-this": {
"id": "how-i-built-this",
"title": "How I Built This with Guy Raz",
"info": "Guy Raz dives into the stories behind some of the world's best known companies. How I Built This weaves a narrative journey about innovators, entrepreneurs and idealists—and the movements they built.",
"imageSrc": "https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/05/howIBuiltThis.png",
"officialWebsiteLink": "https://www.npr.org/podcasts/510313/how-i-built-this",
"airtime": "SUN 7:30pm-8pm",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/how-i-built-this",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/3zxy",
"apple": "https://itunes.apple.com/us/podcast/how-i-built-this-with-guy-raz/id1150510297?mt=2",
"tuneIn": "https://tunein.com/podcasts/Arts--Culture-Podcasts/How-I-Built-This-p910896/",
"rss": "https://feeds.npr.org/510313/podcast.xml"
}
},
"inside-europe": {
"id": "inside-europe",
"title": "Inside Europe",
"info": "Inside Europe, a one-hour weekly news magazine hosted by Helen Seeney and Keith Walker, explores the topical issues shaping the continent. No other part of the globe has experienced such dynamic political and social change in recent years.",
"airtime": "SAT 3am-4am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Inside-Europe-Podcast-Tile-300x300-1.jpg",
"meta": {
"site": "news",
"source": "Deutsche Welle"
},
"link": "/radio/program/inside-europe",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/inside-europe/id80106806?mt=2",
"tuneIn": "https://tunein.com/radio/Inside-Europe-p731/",
"rss": "https://partner.dw.com/xml/podcast_inside-europe"
}
},
"latino-usa": {
"id": "latino-usa",
"title": "Latino USA",
"airtime": "MON 1am-2am, SUN 6pm-7pm",
"info": "Latino USA, the radio journal of news and culture, is the only national, English-language radio program produced from a Latino perspective.",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/latinoUsa.jpg",
"officialWebsiteLink": "http://latinousa.org/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/latino-usa",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/xtTd",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=79681317&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Latino-USA-p621/",
"rss": "https://feeds.npr.org/510016/podcast.xml"
}
},
"live-from-here-highlights": {
"id": "live-from-here-highlights",
"title": "Live from Here Highlights",
"info": "Chris Thile steps to the mic as the host of Live from Here (formerly A Prairie Home Companion), a live public radio variety show. Download Chris’s Song of the Week plus other highlights from the broadcast. Produced by American Public Media.",
"airtime": "SAT 6pm-8pm, SUN 11am-1pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Live-From-Here-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.livefromhere.org/",
"meta": {
"site": "arts",
"source": "american public media"
},
"link": "/radio/program/live-from-here-highlights",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/id1167173941",
"tuneIn": "https://tunein.com/radio/Live-from-Here-Highlights-p921744/",
"rss": "https://feeds.publicradio.org/public_feeds/a-prairie-home-companion-highlights/rss/rss"
}
},
"marketplace": {
"id": "marketplace",
"title": "Marketplace",
"info": "Our flagship program, helmed by Kai Ryssdal, examines what the day in money delivered, through stories, conversations, newsworthy numbers and more. Updated Monday through Friday at about 3:30 p.m. PT.",
"airtime": "MON-FRI 4pm-4:30pm, MON-WED 6:30pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Marketplace-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.marketplace.org/",
"meta": {
"site": "news",
"source": "American Public Media"
},
"link": "/radio/program/marketplace",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=201853034&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/APM-Marketplace-p88/",
"rss": "https://feeds.publicradio.org/public_feeds/marketplace-pm/rss/rss"
}
},
"mindshift": {
"id": "mindshift",
"title": "MindShift",
"tagline": "A podcast about the future of learning and how we raise our kids",
"info": "The MindShift podcast explores the innovations in education that are shaping how kids learn. Hosts Ki Sung and Katrina Schwartz introduce listeners to educators, researchers, parents and students who are developing effective ways to improve how kids learn. We cover topics like how fed-up administrators are developing surprising tactics to deal with classroom disruptions; how listening to podcasts are helping kids develop reading skills; the consequences of overparenting; and why interdisciplinary learning can engage students on all ends of the traditional achievement spectrum. This podcast is part of the MindShift education site, a division of KQED News. KQED is an NPR/PBS member station based in San Francisco. You can also visit the MindShift website for episodes and supplemental blog posts or tweet us \u003ca href=\"https://twitter.com/MindShiftKQED\">@MindShiftKQED\u003c/a> or visit us at \u003ca href=\"/mindshift\">MindShift.KQED.org\u003c/a>",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Mindshift-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED MindShift: How We Will Learn",
"officialWebsiteLink": "/mindshift/",
"meta": {
"site": "news",
"source": "kqed",
"order": 13
},
"link": "/podcasts/mindshift",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/mindshift-podcast/id1078765985",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM1NzY0NjAwNDI5",
"npr": "https://www.npr.org/podcasts/464615685/mind-shift-podcast",
"stitcher": "https://www.stitcher.com/podcast/kqed/stories-teachers-share",
"spotify": "https://open.spotify.com/show/0MxSpNYZKNprFLCl7eEtyx"
}
},
"morning-edition": {
"id": "morning-edition",
"title": "Morning Edition",
"info": "\u003cem>Morning Edition\u003c/em> takes listeners around the country and the world with multi-faceted stories and commentaries every weekday. Hosts Steve Inskeep, David Greene and Rachel Martin bring you the latest breaking news and features to prepare you for the day.",
"airtime": "MON-FRI 3am-9am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Morning-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/morning-edition/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/morning-edition"
},
"onourwatch": {
"id": "onourwatch",
"title": "On Our Watch",
"tagline": "Deeply-reported investigative journalism",
"info": "For decades, the process for how police police themselves has been inconsistent – if not opaque. In some states, like California, these proceedings were completely hidden. After a new police transparency law unsealed scores of internal affairs files, our reporters set out to examine these cases and the shadow world of police discipline. On Our Watch brings listeners into the rooms where officers are questioned and witnesses are interrogated to find out who this system is really protecting. Is it the officers, or the public they've sworn to serve?",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/On-Our-Watch-Podcast-Tile-703x703-1.jpg",
"imageAlt": "On Our Watch from NPR and KQED",
"officialWebsiteLink": "/podcasts/onourwatch",
"meta": {
"site": "news",
"source": "kqed",
"order": 12
},
"link": "/podcasts/onourwatch",
"subscribe": {
"apple": "https://podcasts.apple.com/podcast/id1567098962",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5ucHIub3JnLzUxMDM2MC9wb2RjYXN0LnhtbD9zYz1nb29nbGVwb2RjYXN0cw",
"npr": "https://rpb3r.app.goo.gl/onourwatch",
"spotify": "https://open.spotify.com/show/0OLWoyizopu6tY1XiuX70x",
"tuneIn": "https://tunein.com/radio/On-Our-Watch-p1436229/",
"stitcher": "https://www.stitcher.com/show/on-our-watch",
"rss": "https://feeds.npr.org/510360/podcast.xml"
}
},
"on-the-media": {
"id": "on-the-media",
"title": "On The Media",
"info": "Our weekly podcast explores how the media 'sausage' is made, casts an incisive eye on fluctuations in the marketplace of ideas, and examines threats to the freedom of information and expression in America and abroad. For one hour a week, the show tries to lift the veil from the process of \"making media,\" especially news media, because it's through that lens that we see the world and the world sees us",
"airtime": "SUN 2pm-3pm, MON 12am-1am",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/onTheMedia.png",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/otm",
"meta": {
"site": "news",
"source": "wnyc"
},
"link": "/radio/program/on-the-media",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/on-the-media/id73330715?mt=2",
"tuneIn": "https://tunein.com/radio/On-the-Media-p69/",
"rss": "http://feeds.wnyc.org/onthemedia"
}
},
"our-body-politic": {
"id": "our-body-politic",
"title": "Our Body Politic",
"info": "Presented by KQED, KCRW and KPCC, and created and hosted by award-winning journalist Farai Chideya, Our Body Politic is unapologetically centered on reporting on not just how women of color experience the major political events of today, but how they’re impacting those very issues.",
"airtime": "SAT 6pm-7pm, SUN 1am-2am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Our-Body-Politic-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://our-body-politic.simplecast.com/",
"meta": {
"site": "news",
"source": "kcrw"
},
"link": "/radio/program/our-body-politic",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/our-body-politic/id1533069868",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5zaW1wbGVjYXN0LmNvbS9feGFQaHMxcw",
"spotify": "https://open.spotify.com/show/4ApAiLT1kV153TttWAmqmc",
"rss": "https://feeds.simplecast.com/_xaPhs1s",
"tuneIn": "https://tunein.com/podcasts/News--Politics-Podcasts/Our-Body-Politic-p1369211/"
}
},
"pbs-newshour": {
"id": "pbs-newshour",
"title": "PBS NewsHour",
"info": "Analysis, background reports and updates from the PBS NewsHour putting today's news in context.",
"airtime": "MON-FRI 3pm-4pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/PBS-News-Hour-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.pbs.org/newshour/",
"meta": {
"site": "news",
"source": "pbs"
},
"link": "/radio/program/pbs-newshour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/pbs-newshour-full-show/id394432287?mt=2",
"tuneIn": "https://tunein.com/radio/PBS-NewsHour---Full-Show-p425698/",
"rss": "https://www.pbs.org/newshour/feeds/rss/podcasts/show"
}
},
"perspectives": {
"id": "perspectives",
"title": "Perspectives",
"tagline": "KQED's series of daily listener commentaries since 1991",
"info": "KQED's series of daily listener commentaries since 1991.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/01/Perspectives_Tile_Final.jpg",
"officialWebsiteLink": "/perspectives/",
"meta": {
"site": "radio",
"source": "kqed",
"order": 15
},
"link": "/perspectives",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/id73801135",
"npr": "https://www.npr.org/podcasts/432309616/perspectives",
"rss": "https://ww2.kqed.org/perspectives/category/perspectives/feed/",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93dzIua3FlZC5vcmcvcGVyc3BlY3RpdmVzL2NhdGVnb3J5L3BlcnNwZWN0aXZlcy9mZWVkLw"
}
},
"planet-money": {
"id": "planet-money",
"title": "Planet Money",
"info": "The economy explained. Imagine you could call up a friend and say, Meet me at the bar and tell me what's going on with the economy. Now imagine that's actually a fun evening.",
"airtime": "SUN 3pm-4pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/planetmoney.jpg",
"officialWebsiteLink": "https://www.npr.org/sections/money/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/planet-money",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/M4f5",
"apple": "https://itunes.apple.com/us/podcast/planet-money/id290783428?mt=2",
"tuneIn": "https://tunein.com/podcasts/Business--Economics-Podcasts/Planet-Money-p164680/",
"rss": "https://feeds.npr.org/510289/podcast.xml"
}
},
"politicalbreakdown": {
"id": "politicalbreakdown",
"title": "Political Breakdown",
"tagline": "Politics from a personal perspective",
"info": "Political Breakdown is a new series that explores the political intersection of California and the nation. Each week hosts Scott Shafer and Marisa Lagos are joined with a new special guest to unpack politics -- with personality — and offer an insider’s glimpse at how politics happens.",
"airtime": "THU 6:30pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Political-Breakdown-2024-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Political Breakdown",
"officialWebsiteLink": "/podcasts/politicalbreakdown",
"meta": {
"site": "radio",
"source": "kqed",
"order": 6
},
"link": "/podcasts/politicalbreakdown",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/political-breakdown/id1327641087",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM5Nzk2MzI2MTEx",
"npr": "https://www.npr.org/podcasts/572155894/political-breakdown",
"stitcher": "https://www.stitcher.com/podcast/kqed/political-breakdown",
"spotify": "https://open.spotify.com/show/07RVyIjIdk2WDuVehvBMoN",
"rss": "https://ww2.kqed.org/news/tag/political-breakdown/feed/podcast"
}
},
"pri-the-world": {
"id": "pri-the-world",
"title": "PRI's The World: Latest Edition",
"info": "Each weekday, host Marco Werman and his team of producers bring you the world's most interesting stories in an hour of radio that reminds us just how small our planet really is.",
"airtime": "MON-FRI 2pm-3pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-World-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.pri.org/programs/the-world",
"meta": {
"site": "news",
"source": "PRI"
},
"link": "/radio/program/pri-the-world",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/pris-the-world-latest-edition/id278196007?mt=2",
"tuneIn": "https://tunein.com/podcasts/News--Politics-Podcasts/PRIs-The-World-p24/",
"rss": "http://feeds.feedburner.com/pri/theworld"
}
},
"radiolab": {
"id": "radiolab",
"title": "Radiolab",
"info": "A two-time Peabody Award-winner, Radiolab is an investigation told through sounds and stories, and centered around one big idea. In the Radiolab world, information sounds like music and science and culture collide. Hosted by Jad Abumrad and Robert Krulwich, the show is designed for listeners who demand skepticism, but appreciate wonder. WNYC Studios is the producer of other leading podcasts including Freakonomics Radio, Death, Sex & Money, On the Media and many more.",
"airtime": "SUN 12am-1am, SAT 2pm-3pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/radiolab1400.png",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/radiolab/",
"meta": {
"site": "science",
"source": "WNYC"
},
"link": "/radio/program/radiolab",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/radiolab/id152249110?mt=2",
"tuneIn": "https://tunein.com/radio/RadioLab-p68032/",
"rss": "https://feeds.wnyc.org/radiolab"
}
},
"reveal": {
"id": "reveal",
"title": "Reveal",
"info": "Created by The Center for Investigative Reporting and PRX, Reveal is public radios first one-hour weekly radio show and podcast dedicated to investigative reporting. Credible, fact based and without a partisan agenda, Reveal combines the power and artistry of driveway moment storytelling with data-rich reporting on critically important issues. The result is stories that inform and inspire, arming our listeners with information to right injustices, hold the powerful accountable and improve lives.Reveal is hosted by Al Letson and showcases the award-winning work of CIR and newsrooms large and small across the nation. In a radio and podcast market crowded with choices, Reveal focuses on important and often surprising stories that illuminate the world for our listeners.",
"airtime": "SAT 4pm-5pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/reveal300px.png",
"officialWebsiteLink": "https://www.revealnews.org/episodes/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/reveal",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/reveal/id886009669",
"tuneIn": "https://tunein.com/radio/Reveal-p679597/",
"rss": "http://feeds.revealradio.org/revealpodcast"
}
},
"says-you": {
"id": "says-you",
"title": "Says You!",
"info": "Public radio's game show of bluff and bluster, words and whimsy. The warmest, wittiest cocktail party - it's spirited and civil, brainy and boisterous, peppered with musical interludes. Fast paced and playful, it's the most fun you can have with language without getting your mouth washed out with soap. Our motto: It's not important to know the answers, it's important to like the answers!",
"airtime": "SUN 4pm-5pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Says-You-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "http://www.saysyouradio.com/",
"meta": {
"site": "comedy",
"source": "Pipit and Finch"
},
"link": "/radio/program/says-you",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/says-you!/id1050199826",
"tuneIn": "https://tunein.com/radio/Says-You-p480/",
"rss": "https://saysyou.libsyn.com/rss"
}
},
"science-friday": {
"id": "science-friday",
"title": "Science Friday",
"info": "Science Friday is a weekly science talk show, broadcast live over public radio stations nationwide. Each week, the show focuses on science topics that are in the news and tries to bring an educated, balanced discussion to bear on the scientific issues at hand. Panels of expert guests join host Ira Flatow, a veteran science journalist, to discuss science and to take questions from listeners during the call-in portion of the program.",
"airtime": "FRI 11am-1pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Science-Friday-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/science-friday",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/science-friday",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=73329284&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Science-Friday-p394/",
"rss": "http://feeds.wnyc.org/science-friday"
}
},
"selected-shorts": {
"id": "selected-shorts",
"title": "Selected Shorts",
"info": "Spellbinding short stories by established and emerging writers take on a new life when they are performed by stars of the stage and screen.",
"airtime": "SAT 8pm-9pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Selected-Shorts-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.pri.org/programs/selected-shorts",
"meta": {
"site": "arts",
"source": "pri"
},
"link": "/radio/program/selected-shorts",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=253191824&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Selected-Shorts-p31792/",
"rss": "https://feeds.megaphone.fm/selectedshorts"
}
},
"snap-judgment": {
"id": "snap-judgment",
"title": "Snap Judgment",
"tagline": "Real stories with killer beats",
"info": "The Snap Judgment radio show and podcast mixes real stories with killer beats to produce cinematic, dramatic radio. Snap's musical brand of storytelling dares listeners to see the world through the eyes of another. This is storytelling... with a BEAT!! Snap first aired on public radio stations nationwide in July 2010. Today, Snap Judgment airs on over 450 public radio stations and is brought to the airwaves by KQED & PRX.",
"airtime": "SAT 1pm-2pm, 9pm-10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/05/Snap-Judgment-Podcast-Tile-703x703-1.jpg",
"officialWebsiteLink": "https://snapjudgment.org",
"meta": {
"site": "arts",
"source": "kqed",
"order": 5
},
"link": "https://snapjudgment.org",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/snap-judgment/id283657561",
"npr": "https://www.npr.org/podcasts/449018144/snap-judgment",
"stitcher": "https://www.pandora.com/podcast/snap-judgment/PC:241?source=stitcher-sunset",
"spotify": "https://open.spotify.com/show/3Cct7ZWmxHNAtLgBTqjC5v",
"rss": "https://snap.feed.snapjudgment.org/"
}
},
"soldout": {
"id": "soldout",
"title": "SOLD OUT: Rethinking Housing in America",
"tagline": "A new future for housing",
"info": "Sold Out: Rethinking Housing in America",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Sold-Out-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Sold Out: Rethinking Housing in America",
"officialWebsiteLink": "/podcasts/soldout",
"meta": {
"site": "news",
"source": "kqed",
"order": 14
},
"link": "/podcasts/soldout",
"subscribe": {
"npr": "https://www.npr.org/podcasts/911586047/s-o-l-d-o-u-t-a-new-future-for-housing",
"apple": "https://podcasts.apple.com/us/podcast/introducing-sold-out-rethinking-housing-in-america/id1531354937",
"rss": "https://feeds.megaphone.fm/soldout",
"spotify": "https://open.spotify.com/show/38dTBSk2ISFoPiyYNoKn1X",
"stitcher": "https://www.stitcher.com/podcast/kqed/sold-out-rethinking-housing-in-america",
"tunein": "https://tunein.com/radio/SOLD-OUT-Rethinking-Housing-in-America-p1365871/",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vc29sZG91dA"
}
},
"spooked": {
"id": "spooked",
"title": "Spooked",
"tagline": "True-life supernatural stories",
"info": "",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/10/Spooked-Podcast-Tile-703x703-1.jpg",
"imageAlt": "",
"officialWebsiteLink": "https://spookedpodcast.org/",
"meta": {
"site": "news",
"source": "kqed",
"order": 8
},
"link": "https://spookedpodcast.org/",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/spooked/id1279361017",
"npr": "https://www.npr.org/podcasts/549547848/snap-judgment-presents-spooked",
"spotify": "https://open.spotify.com/show/76571Rfl3m7PLJQZKQIGCT",
"rss": "https://feeds.simplecast.com/TBotaapn"
}
},
"ted-radio-hour": {
"id": "ted-radio-hour",
"title": "TED Radio Hour",
"info": "The TED Radio Hour is a journey through fascinating ideas, astonishing inventions, fresh approaches to old problems, and new ways to think and create.",
"airtime": "SUN 3pm-4pm, SAT 10pm-11pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/tedRadioHour.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/ted-radio-hour/?showDate=2018-06-22",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/ted-radio-hour",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/8vsS",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=523121474&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/TED-Radio-Hour-p418021/",
"rss": "https://feeds.npr.org/510298/podcast.xml"
}
},
"tech-nation": {
"id": "tech-nation",
"title": "Tech Nation Radio Podcast",
"info": "Tech Nation is a weekly public radio program, hosted by Dr. Moira Gunn. Founded in 1993, it has grown from a simple interview show to a multi-faceted production, featuring conversations with noted technology and science leaders, and a weekly science and technology-related commentary.",
"airtime": "FRI 10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Tech-Nation-Radio-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "http://technation.podomatic.com/",
"meta": {
"site": "science",
"source": "Tech Nation Media"
},
"link": "/radio/program/tech-nation",
"subscribe": {
"rss": "https://technation.podomatic.com/rss2.xml"
}
},
"thebay": {
"id": "thebay",
"title": "The Bay",
"tagline": "Local news to keep you rooted",
"info": "Host Devin Katayama walks you through the biggest story of the day with reporters and newsmakers.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Bay-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Bay",
"officialWebsiteLink": "/podcasts/thebay",
"meta": {
"site": "radio",
"source": "kqed",
"order": 3
},
"link": "/podcasts/thebay",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-bay/id1350043452",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM4MjU5Nzg2MzI3",
"npr": "https://www.npr.org/podcasts/586725995/the-bay",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-bay",
"spotify": "https://open.spotify.com/show/4BIKBKIujizLHlIlBNaAqQ",
"rss": "https://feeds.megaphone.fm/KQINC8259786327"
}
},
"californiareport": {
"id": "californiareport",
"title": "The California Report",
"tagline": "California, day by day",
"info": "KQED’s statewide radio news program providing daily coverage of issues, trends and public policy decisions.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-California-Report-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The California Report",
"officialWebsiteLink": "/californiareport",
"meta": {
"site": "news",
"source": "kqed",
"order": 9
},
"link": "/californiareport",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/kqeds-the-california-report/id79681292",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM1MDAyODE4NTgz",
"npr": "https://www.npr.org/podcasts/432285393/the-california-report",
"stitcher": "https://www.stitcher.com/podcast/kqedfm-kqeds-the-california-report-podcast-8838",
"rss": "https://ww2.kqed.org/news/tag/tcram/feed/podcast"
}
},
"californiareportmagazine": {
"id": "californiareportmagazine",
"title": "The California Report Magazine",
"tagline": "Your state, your stories",
"info": "Every week, The California Report Magazine takes you on a road trip for the ears: to visit the places and meet the people who make California unique. The in-depth storytelling podcast from the California Report.",
"airtime": "FRI 4:30pm-5pm, 6:30pm-7pm, 11pm-11:30pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-California-Report-Magazine-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The California Report Magazine",
"officialWebsiteLink": "/californiareportmagazine",
"meta": {
"site": "news",
"source": "kqed",
"order": 11
},
"link": "/californiareportmagazine",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-california-report-magazine/id1314750545",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM3NjkwNjk1OTAz",
"npr": "https://www.npr.org/podcasts/564733126/the-california-report-magazine",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-california-report-magazine",
"rss": "https://ww2.kqed.org/news/tag/tcrmag/feed/podcast"
}
},
"closealltabs": {
"id": "closealltabs",
"title": "Close All Tabs",
"tagline": "Your irreverent guide to the trends redefining our world",
"info": "Close All Tabs breaks down how digital culture shapes our world through thoughtful insights and irreverent humor.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/02/CAT_2_Tile-scaled.jpg",
"imageAlt": "\"KQED Close All Tabs",
"officialWebsiteLink": "/podcasts/closealltabs",
"meta": {
"site": "news",
"source": "kqed",
"order": 2
},
"link": "/podcasts/closealltabs",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/close-all-tabs/id214663465",
"rss": "https://feeds.megaphone.fm/KQINC6993880386",
"amazon": "https://music.amazon.com/podcasts/92d9d4ac-67a3-4eed-b10a-fb45d45b1ef2/close-all-tabs",
"spotify": "https://open.spotify.com/show/6LAJFHnGK1pYXYzv6SIol6?si=deb0cae19813417c"
}
},
"thelatest": {
"id": "thelatest",
"title": "The Latest",
"tagline": "Trusted local news in real time",
"info": "",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/05/The-Latest-2025-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Latest",
"officialWebsiteLink": "/thelatest",
"meta": {
"site": "news",
"source": "kqed",
"order": 7
},
"link": "/thelatest",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-latest-from-kqed/id1197721799",
"npr": "https://www.npr.org/podcasts/1257949365/the-latest-from-k-q-e-d",
"spotify": "https://open.spotify.com/show/5KIIXMgM9GTi5AepwOYvIZ?si=bd3053fec7244dba",
"rss": "https://feeds.megaphone.fm/KQINC9137121918"
}
},
"theleap": {
"id": "theleap",
"title": "The Leap",
"tagline": "What if you closed your eyes, and jumped?",
"info": "Stories about people making dramatic, risky changes, told by award-winning public radio reporter Judy Campbell.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Leap-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Leap",
"officialWebsiteLink": "/podcasts/theleap",
"meta": {
"site": "news",
"source": "kqed",
"order": 17
},
"link": "/podcasts/theleap",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-leap/id1046668171",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM0NTcwODQ2MjY2",
"npr": "https://www.npr.org/podcasts/447248267/the-leap",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-leap",
"spotify": "https://open.spotify.com/show/3sSlVHHzU0ytLwuGs1SD1U",
"rss": "https://ww2.kqed.org/news/programs/the-leap/feed/podcast"
}
},
"masters-of-scale": {
"id": "masters-of-scale",
"title": "Masters of Scale",
"info": "Masters of Scale is an original podcast in which LinkedIn co-founder and Greylock Partner Reid Hoffman sets out to describe and prove theories that explain how great entrepreneurs take their companies from zero to a gazillion in ingenious fashion.",
"airtime": "Every other Wednesday June 12 through October 16 at 8pm (repeats Thursdays at 2am)",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Masters-of-Scale-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://mastersofscale.com/",
"meta": {
"site": "radio",
"source": "WaitWhat"
},
"link": "/radio/program/masters-of-scale",
"subscribe": {
"apple": "http://mastersofscale.app.link/",
"rss": "https://rss.art19.com/masters-of-scale"
}
},
"the-moth-radio-hour": {
"id": "the-moth-radio-hour",
"title": "The Moth Radio Hour",
"info": "Since its launch in 1997, The Moth has presented thousands of true stories, told live and without notes, to standing-room-only crowds worldwide. Moth storytellers stand alone, under a spotlight, with only a microphone and a roomful of strangers. The storyteller and the audience embark on a high-wire act of shared experience which is both terrifying and exhilarating. Since 2008, The Moth podcast has featured many of our favorite stories told live on Moth stages around the country. For information on all of our programs and live events, visit themoth.org.",
"airtime": "SAT 8pm-9pm and SUN 11am-12pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/theMoth.jpg",
"officialWebsiteLink": "https://themoth.org/",
"meta": {
"site": "arts",
"source": "prx"
},
"link": "/radio/program/the-moth-radio-hour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/the-moth-podcast/id275699983?mt=2",
"tuneIn": "https://tunein.com/radio/The-Moth-p273888/",
"rss": "http://feeds.themoth.org/themothpodcast"
}
},
"the-new-yorker-radio-hour": {
"id": "the-new-yorker-radio-hour",
"title": "The New Yorker Radio Hour",
"info": "The New Yorker Radio Hour is a weekly program presented by the magazine's editor, David Remnick, and produced by WNYC Studios and The New Yorker. Each episode features a diverse mix of interviews, profiles, storytelling, and an occasional burst of humor inspired by the magazine, and shaped by its writers, artists, and editors. This isn't a radio version of a magazine, but something all its own, reflecting the rich possibilities of audio storytelling and conversation. Theme music for the show was composed and performed by Merrill Garbus of tUnE-YArDs.",
"airtime": "SAT 10am-11am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-New-Yorker-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/tnyradiohour",
"meta": {
"site": "arts",
"source": "WNYC"
},
"link": "/radio/program/the-new-yorker-radio-hour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/id1050430296",
"tuneIn": "https://tunein.com/podcasts/WNYC-Podcasts/New-Yorker-Radio-Hour-p803804/",
"rss": "https://feeds.feedburner.com/newyorkerradiohour"
}
},
"the-takeaway": {
"id": "the-takeaway",
"title": "The Takeaway",
"info": "The Takeaway is produced in partnership with its national audience. It delivers perspective and analysis to help us better understand the day’s news. Be a part of the American conversation on-air and online.",
"airtime": "MON-THU 12pm-1pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Takeaway-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/takeaway",
"meta": {
"site": "news",
"source": "WNYC"
},
"link": "/radio/program/the-takeaway",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/the-takeaway/id363143310?mt=2",
"tuneIn": "http://tunein.com/radio/The-Takeaway-p150731/",
"rss": "https://feeds.feedburner.com/takeawaypodcast"
}
},
"this-american-life": {
"id": "this-american-life",
"title": "This American Life",
"info": "This American Life is a weekly public radio show, heard by 2.2 million people on more than 500 stations. Another 2.5 million people download the weekly podcast. It is hosted by Ira Glass, produced in collaboration with Chicago Public Media, delivered to stations by PRX The Public Radio Exchange, and has won all of the major broadcasting awards.",
"airtime": "SAT 12pm-1pm, 7pm-8pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/thisAmericanLife.png",
"officialWebsiteLink": "https://www.thisamericanlife.org/",
"meta": {
"site": "news",
"source": "wbez"
},
"link": "/radio/program/this-american-life",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=201671138&at=11l79Y&ct=nprdirectory",
"rss": "https://www.thisamericanlife.org/podcast/rss.xml"
}
},
"truthbetold": {
"id": "truthbetold",
"title": "Truth Be Told",
"tagline": "Advice by and for people of color",
"info": "We’re the friend you call after a long day, the one who gets it. Through wisdom from some of the greatest thinkers of our time, host Tonya Mosley explores what it means to grow and thrive as a Black person in America, while discovering new ways of being that serve as a portal to more love, more healing, and more joy.",
"airtime": "",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Truth-Be-Told-Podcast-Tile-360x360-1.jpg",
"imageAlt": "KQED Truth Be Told with Tonya Mosley",
"officialWebsiteLink": "https://www.kqed.ord/podcasts/truthbetold",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/podcasts/truthbetold",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/truth-be-told/id1462216572",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93dzIua3FlZC5vcmcvbmV3cy9jYXRlZ29yeS90cnV0aC1iZS10b2xkLXBvZGNhc3QvZmVlZA",
"npr": "https://www.npr.org/podcasts/719210818/truth-be-told",
"stitcher": "https://www.stitcher.com/s?fid=398170&refid=stpr",
"spotify": "https://open.spotify.com/show/587DhwTBxke6uvfwDfaV5N"
}
},
"wait-wait-dont-tell-me": {
"id": "wait-wait-dont-tell-me",
"title": "Wait Wait... Don't Tell Me!",
"info": "Peter Sagal and Bill Kurtis host the weekly NPR News quiz show alongside some of the best and brightest news and entertainment personalities.",
"airtime": "SUN 10am-11am, SAT 11am-12pm, SAT 6pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Wait-Wait-Podcast-Tile-300x300-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/wait-wait-dont-tell-me/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/wait-wait-dont-tell-me",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/Xogv",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=121493804&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Wait-Wait-Dont-Tell-Me-p46/",
"rss": "https://feeds.npr.org/344098539/podcast.xml"
}
},
"washington-week": {
"id": "washington-week",
"title": "Washington Week",
"info": "For 50 years, Washington Week has been the most intelligent and up to date conversation about the most important news stories of the week. Washington Week is the longest-running news and public affairs program on PBS and features journalists -- not pundits -- lending insight and perspective to the week's important news stories.",
"airtime": "SAT 1:30am-2am",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/washington-week.jpg",
"officialWebsiteLink": "http://www.pbs.org/weta/washingtonweek/",
"meta": {
"site": "news",
"source": "pbs"
},
"link": "/radio/program/washington-week",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/washington-week-audio-pbs/id83324702?mt=2",
"tuneIn": "https://tunein.com/podcasts/Current-Affairs/Washington-Week-p693/",
"rss": "http://feeds.pbs.org/pbs/weta/washingtonweek-audio"
}
},
"weekend-edition-saturday": {
"id": "weekend-edition-saturday",
"title": "Weekend Edition Saturday",
"info": "Weekend Edition Saturday wraps up the week's news and offers a mix of analysis and features on a wide range of topics, including arts, sports, entertainment, and human interest stories. The two-hour program is hosted by NPR's Peabody Award-winning Scott Simon.",
"airtime": "SAT 5am-10am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Weekend-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/weekend-edition-saturday/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/weekend-edition-saturday"
},
"weekend-edition-sunday": {
"id": "weekend-edition-sunday",
"title": "Weekend Edition Sunday",
"info": "Weekend Edition Sunday features interviews with newsmakers, artists, scientists, politicians, musicians, writers, theologians and historians. The program has covered news events from Nelson Mandela's 1990 release from a South African prison to the capture of Saddam Hussein.",
"airtime": "SUN 5am-10am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Weekend-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/weekend-edition-sunday/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/weekend-edition-sunday"
},
"world-affairs": {
"id": "world-affairs",
"title": "World Affairs",
"info": "The world as we knew it is undergoing a rapid transformation…so what's next? Welcome to WorldAffairs, your guide to a changing world. We give you the context you need to navigate across borders and ideologies. Through sound-rich stories and in-depth interviews, we break down what it means to be a global citizen on a hot, crowded planet. Our hosts, Ray Suarez, Teresa Cotsirilos and Philip Yun help you make sense of an uncertain world, one story at a time.",
"airtime": "MON 10pm, TUE 1am, SAT 3am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/World-Affairs-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.worldaffairs.org/",
"meta": {
"site": "news",
"source": "World Affairs"
},
"link": "/radio/program/world-affairs",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/world-affairs/id101215657?mt=2",
"tuneIn": "https://tunein.com/radio/WorldAffairs-p1665/",
"rss": "https://worldaffairs.libsyn.com/rss"
}
},
"on-shifting-ground": {
"id": "on-shifting-ground",
"title": "On Shifting Ground with Ray Suarez",
"info": "Geopolitical turmoil. A warming planet. Authoritarians on the rise. We live in a chaotic world that’s rapidly shifting around us. “On Shifting Ground with Ray Suarez” explores international fault lines and how they impact us all. Each week, NPR veteran Ray Suarez hosts conversations with journalists, leaders and policy experts to help us read between the headlines – and give us hope for human resilience.",
"airtime": "MON 10pm, TUE 1am, SAT 3am",
"imageSrc": "https://ww2.kqed.org/app/uploads/2022/12/onshiftingground-600x600-1.png",
"officialWebsiteLink": "https://worldaffairs.org/radio-podcast/",
"meta": {
"site": "news",
"source": "On Shifting Ground"
},
"link": "/radio/program/on-shifting-ground",
"subscribe": {
"apple": "https://podcasts.apple.com/ie/podcast/on-shifting-ground/id101215657",
"rss": "https://feeds.libsyn.com/36668/rss"
}
},
"hidden-brain": {
"id": "hidden-brain",
"title": "Hidden Brain",
"info": "Shankar Vedantam uses science and storytelling to reveal the unconscious patterns that drive human behavior, shape our choices and direct our relationships.",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/05/hiddenbrain.jpg",
"officialWebsiteLink": "https://www.npr.org/series/423302056/hidden-brain",
"airtime": "SUN 7pm-8pm",
"meta": {
"site": "news",
"source": "NPR"
},
"link": "/radio/program/hidden-brain",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/hidden-brain/id1028908750?mt=2",
"tuneIn": "https://tunein.com/podcasts/Science-Podcasts/Hidden-Brain-p787503/",
"rss": "https://feeds.npr.org/510308/podcast.xml"
}
},
"hyphenacion": {
"id": "hyphenacion",
"title": "Hyphenación",
"tagline": "Where conversation and cultura meet",
"info": "What kind of no sabo word is Hyphenación? For us, it’s about living within a hyphenation. Like being a third-gen Mexican-American from the Texas border now living that Bay Area Chicano life. Like Xorje! Each week we bring together a couple of hyphenated Latinos to talk all about personal life choices: family, careers, relationships, belonging … everything is on the table. ",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/03/Hyphenacion_FinalAssets_PodcastTile.png",
"imageAlt": "KQED Hyphenación",
"officialWebsiteLink": "/podcasts/hyphenacion",
"meta": {
"site": "news",
"source": "kqed",
"order": 1
},
"link": "/podcasts/hyphenacion",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/hyphenaci%C3%B3n/id1191591838",
"spotify": "https://open.spotify.com/show/2p3Fifq96nw9BPcmFdIq0o?si=39209f7b25774f38",
"youtube": "https://www.youtube.com/c/kqedarts",
"amazon": "https://music.amazon.com/podcasts/6c3dd23c-93fb-4aab-97ba-1725fa6315f1/hyphenaci%C3%B3n",
"rss": "https://feeds.megaphone.fm/KQINC2275451163"
}
},
"city-arts": {
"id": "city-arts",
"title": "City Arts & Lectures",
"info": "A one-hour radio program to hear celebrated writers, artists and thinkers address contemporary ideas and values, often discussing the creative process. Please note: tapes or transcripts are not available",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/05/cityartsandlecture-300x300.jpg",
"officialWebsiteLink": "https://www.cityarts.net/",
"airtime": "SUN 1pm-2pm, TUE 10pm, WED 1am",
"meta": {
"site": "news",
"source": "City Arts & Lectures"
},
"link": "https://www.cityarts.net",
"subscribe": {
"tuneIn": "https://tunein.com/radio/City-Arts-and-Lectures-p692/",
"rss": "https://www.cityarts.net/feed/"
}
},
"white-lies": {
"id": "white-lies",
"title": "White Lies",
"info": "In 1965, Rev. James Reeb was murdered in Selma, Alabama. Three men were tried and acquitted, but no one was ever held to account. Fifty years later, two journalists from Alabama return to the city where it happened, expose the lies that kept the murder from being solved and uncover a story about guilt and memory that says as much about America today as it does about the past.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/White-Lies-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/podcasts/510343/white-lies",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/white-lies",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/whitelies",
"apple": "https://podcasts.apple.com/podcast/id1462650519?mt=2&at=11l79Y&ct=nprdirectory",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5ucHIub3JnLzUxMDM0My9wb2RjYXN0LnhtbA",
"spotify": "https://open.spotify.com/show/12yZ2j8vxqhc0QZyRES3ft?si=LfWYEK6URA63hueKVxRLAw",
"rss": "https://feeds.npr.org/510343/podcast.xml"
}
},
"rightnowish": {
"id": "rightnowish",
"title": "Rightnowish",
"tagline": "Art is where you find it",
"info": "Rightnowish digs into life in the Bay Area right now… ish. Journalist Pendarvis Harshaw takes us to galleries painted on the sides of liquor stores in West Oakland. We'll dance in warehouses in the Bayview, make smoothies with kids in South Berkeley, and listen to classical music in a 1984 Cutlass Supreme in Richmond. Every week, Pen talks to movers and shakers about how the Bay Area shapes what they create, and how they shape the place we call home.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Rightnowish-Podcast-Tile-500x500-1.jpg",
"imageAlt": "KQED Rightnowish with Pendarvis Harshaw",
"officialWebsiteLink": "/podcasts/rightnowish",
"meta": {
"site": "arts",
"source": "kqed",
"order": 16
},
"link": "/podcasts/rightnowish",
"subscribe": {
"npr": "https://www.npr.org/podcasts/721590300/rightnowish",
"rss": "https://ww2.kqed.org/arts/programs/rightnowish/feed/podcast",
"apple": "https://podcasts.apple.com/us/podcast/rightnowish/id1482187648",
"stitcher": "https://www.stitcher.com/podcast/kqed/rightnowish",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkMxMjU5MTY3NDc4",
"spotify": "https://open.spotify.com/show/7kEJuafTzTVan7B78ttz1I"
}
},
"jerrybrown": {
"id": "jerrybrown",
"title": "The Political Mind of Jerry Brown",
"tagline": "Lessons from a lifetime in politics",
"info": "The Political Mind of Jerry Brown brings listeners the wisdom of the former Governor, Mayor, and presidential candidate. Scott Shafer interviewed Brown for more than 40 hours, covering the former governor's life and half-century in the political game and Brown has some lessons he'd like to share. ",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Political-Mind-of-Jerry-Brown-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Political Mind of Jerry Brown",
"officialWebsiteLink": "/podcasts/jerrybrown",
"meta": {
"site": "news",
"source": "kqed",
"order": 18
},
"link": "/podcasts/jerrybrown",
"subscribe": {
"npr": "https://www.npr.org/podcasts/790253322/the-political-mind-of-jerry-brown",
"apple": "https://itunes.apple.com/us/podcast/id1492194549",
"rss": "https://ww2.kqed.org/news/series/jerrybrown/feed/podcast/",
"tuneIn": "http://tun.in/pjGcK",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-political-mind-of-jerry-brown",
"spotify": "https://open.spotify.com/show/54C1dmuyFyKMFttY6X2j6r?si=K8SgRCoISNK6ZbjpXrX5-w",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93dzIua3FlZC5vcmcvbmV3cy9zZXJpZXMvamVycnlicm93bi9mZWVkL3BvZGNhc3Qv"
}
},
"tinydeskradio": {
"id": "tinydeskradio",
"title": "Tiny Desk Radio",
"info": "We're bringing the best of Tiny Desk to the airwaves, only on public radio.",
"airtime": "SUN 8pm and SAT 9pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/04/300x300-For-Member-Station-Logo-Tiny-Desk-Radio-@2x.png",
"officialWebsiteLink": "https://www.npr.org/series/g-s1-52030/tiny-desk-radio",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/tinydeskradio",
"subscribe": {
"rss": "https://feeds.npr.org/g-s1-52030/rss.xml"
}
},
"the-splendid-table": {
"id": "the-splendid-table",
"title": "The Splendid Table",
"info": "\u003cem>The Splendid Table\u003c/em> hosts our nation's conversations about cooking, sustainability and food culture.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Splendid-Table-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.splendidtable.org/",
"airtime": "SUN 10-11 pm",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/the-splendid-table"
}
},
"racesReducer": {},
"racesGenElectionReducer": {},
"radioSchedulesReducer": {},
"listsReducer": {
"posts/news?tag=openai": {
"isFetching": false,
"latestQuery": {
"from": 0,
"postsToRender": 9
},
"tag": null,
"vitalsOnly": true,
"totalRequested": 9,
"isLoading": false,
"isLoadingMore": true,
"total": {
"value": 20,
"relation": "eq"
},
"items": [
"news_12063401",
"news_12060365",
"news_12054490",
"news_12038874",
"news_12038154",
"news_12038029",
"news_12037518",
"news_12034916",
"news_12034490"
]
}
},
"recallGuideReducer": {
"intros": {},
"policy": {},
"candidates": {}
},
"savedArticleReducer": {
"articles": [],
"status": {}
},
"pfsSessionReducer": {},
"subscriptionsReducer": {},
"termsReducer": {
"about": {
"name": "About",
"type": "terms",
"id": "about",
"slug": "about",
"link": "/about",
"taxonomy": "site"
},
"arts": {
"name": "Arts & Culture",
"grouping": [
"arts",
"pop",
"trulyca"
],
"description": "KQED Arts provides daily in-depth coverage of the Bay Area's music, art, film, performing arts, literature and arts news, as well as cultural commentary and criticism.",
"type": "terms",
"id": "arts",
"slug": "arts",
"link": "/arts",
"taxonomy": "site"
},
"artschool": {
"name": "Art School",
"parent": "arts",
"type": "terms",
"id": "artschool",
"slug": "artschool",
"link": "/artschool",
"taxonomy": "site"
},
"bayareabites": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"parent": "food",
"type": "terms",
"id": "bayareabites",
"slug": "bayareabites",
"link": "/food",
"taxonomy": "site"
},
"bayareahiphop": {
"name": "Bay Area Hiphop",
"type": "terms",
"id": "bayareahiphop",
"slug": "bayareahiphop",
"link": "/bayareahiphop",
"taxonomy": "site"
},
"campaign21": {
"name": "Campaign 21",
"type": "terms",
"id": "campaign21",
"slug": "campaign21",
"link": "/campaign21",
"taxonomy": "site"
},
"checkplease": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"parent": "food",
"type": "terms",
"id": "checkplease",
"slug": "checkplease",
"link": "/food",
"taxonomy": "site"
},
"education": {
"name": "Education",
"grouping": [
"education"
],
"type": "terms",
"id": "education",
"slug": "education",
"link": "/education",
"taxonomy": "site"
},
"elections": {
"name": "Elections",
"type": "terms",
"id": "elections",
"slug": "elections",
"link": "/elections",
"taxonomy": "site"
},
"events": {
"name": "Events",
"type": "terms",
"id": "events",
"slug": "events",
"link": "/events",
"taxonomy": "site"
},
"event": {
"name": "Event",
"alias": "events",
"type": "terms",
"id": "event",
"slug": "event",
"link": "/event",
"taxonomy": "site"
},
"filmschoolshorts": {
"name": "Film School Shorts",
"type": "terms",
"id": "filmschoolshorts",
"slug": "filmschoolshorts",
"link": "/filmschoolshorts",
"taxonomy": "site"
},
"food": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"type": "terms",
"id": "food",
"slug": "food",
"link": "/food",
"taxonomy": "site"
},
"forum": {
"name": "Forum",
"relatedContentQuery": "posts/forum?",
"parent": "news",
"type": "terms",
"id": "forum",
"slug": "forum",
"link": "/forum",
"taxonomy": "site"
},
"futureofyou": {
"name": "Future of You",
"grouping": [
"science",
"futureofyou"
],
"parent": "science",
"type": "terms",
"id": "futureofyou",
"slug": "futureofyou",
"link": "/futureofyou",
"taxonomy": "site"
},
"jpepinheart": {
"name": "KQED food",
"relatedContentQuery": "posts/food,bayareabites,checkplease",
"parent": "food",
"type": "terms",
"id": "jpepinheart",
"slug": "jpepinheart",
"link": "/food",
"taxonomy": "site"
},
"liveblog": {
"name": "Live Blog",
"type": "terms",
"id": "liveblog",
"slug": "liveblog",
"link": "/liveblog",
"taxonomy": "site"
},
"livetv": {
"name": "Live TV",
"parent": "tv",
"type": "terms",
"id": "livetv",
"slug": "livetv",
"link": "/livetv",
"taxonomy": "site"
},
"lowdown": {
"name": "The Lowdown",
"relatedContentQuery": "posts/lowdown?",
"parent": "news",
"type": "terms",
"id": "lowdown",
"slug": "lowdown",
"link": "/lowdown",
"taxonomy": "site"
},
"mindshift": {
"name": "Mindshift",
"parent": "news",
"description": "MindShift explores the future of education by highlighting the innovative – and sometimes counterintuitive – ways educators and parents are helping all children succeed.",
"type": "terms",
"id": "mindshift",
"slug": "mindshift",
"link": "/mindshift",
"taxonomy": "site"
},
"news": {
"name": "News",
"grouping": [
"news",
"forum"
],
"type": "terms",
"id": "news",
"slug": "news",
"link": "/news",
"taxonomy": "site"
},
"perspectives": {
"name": "Perspectives",
"parent": "radio",
"type": "terms",
"id": "perspectives",
"slug": "perspectives",
"link": "/perspectives",
"taxonomy": "site"
},
"podcasts": {
"name": "Podcasts",
"type": "terms",
"id": "podcasts",
"slug": "podcasts",
"link": "/podcasts",
"taxonomy": "site"
},
"pop": {
"name": "Pop",
"parent": "arts",
"type": "terms",
"id": "pop",
"slug": "pop",
"link": "/pop",
"taxonomy": "site"
},
"pressroom": {
"name": "Pressroom",
"type": "terms",
"id": "pressroom",
"slug": "pressroom",
"link": "/pressroom",
"taxonomy": "site"
},
"quest": {
"name": "Quest",
"parent": "science",
"type": "terms",
"id": "quest",
"slug": "quest",
"link": "/quest",
"taxonomy": "site"
},
"radio": {
"name": "Radio",
"grouping": [
"forum",
"perspectives"
],
"description": "Listen to KQED Public Radio – home of Forum and The California Report – on 88.5 FM in San Francisco, 89.3 FM in Sacramento, 88.3 FM in Santa Rosa and 88.1 FM in Martinez.",
"type": "terms",
"id": "radio",
"slug": "radio",
"link": "/radio",
"taxonomy": "site"
},
"root": {
"name": "KQED",
"image": "https://ww2.kqed.org/app/uploads/2020/02/KQED-OG-Image@1x.png",
"imageWidth": 1200,
"imageHeight": 630,
"headData": {
"title": "KQED | News, Radio, Podcasts, TV | Public Media for Northern California",
"description": "KQED provides public radio, television, and independent reporting on issues that matter to the Bay Area. We’re the NPR and PBS member station for Northern California."
},
"type": "terms",
"id": "root",
"slug": "root",
"link": "/root",
"taxonomy": "site"
},
"science": {
"name": "Science",
"grouping": [
"science",
"futureofyou"
],
"description": "KQED Science brings you award-winning science and environment coverage from the Bay Area and beyond.",
"type": "terms",
"id": "science",
"slug": "science",
"link": "/science",
"taxonomy": "site"
},
"stateofhealth": {
"name": "State of Health",
"parent": "science",
"type": "terms",
"id": "stateofhealth",
"slug": "stateofhealth",
"link": "/stateofhealth",
"taxonomy": "site"
},
"support": {
"name": "Support",
"type": "terms",
"id": "support",
"slug": "support",
"link": "/support",
"taxonomy": "site"
},
"thedolist": {
"name": "The Do List",
"parent": "arts",
"type": "terms",
"id": "thedolist",
"slug": "thedolist",
"link": "/thedolist",
"taxonomy": "site"
},
"trulyca": {
"name": "Truly CA",
"grouping": [
"arts",
"pop",
"trulyca"
],
"parent": "arts",
"type": "terms",
"id": "trulyca",
"slug": "trulyca",
"link": "/trulyca",
"taxonomy": "site"
},
"tv": {
"name": "TV",
"type": "terms",
"id": "tv",
"slug": "tv",
"link": "/tv",
"taxonomy": "site"
},
"voterguide": {
"name": "Voter Guide",
"parent": "elections",
"alias": "elections",
"type": "terms",
"id": "voterguide",
"slug": "voterguide",
"link": "/voterguide",
"taxonomy": "site"
},
"guiaelectoral": {
"name": "Guia Electoral",
"parent": "elections",
"alias": "elections",
"type": "terms",
"id": "guiaelectoral",
"slug": "guiaelectoral",
"link": "/guiaelectoral",
"taxonomy": "site"
},
"news_33542": {
"type": "terms",
"id": "news_33542",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33542",
"found": true
},
"relationships": {},
"featImg": null,
"name": "OpenAI",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "OpenAI Archives | KQED News",
"ogDescription": null,
"imageData": {
"ogImageSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"width": 1200,
"height": 630
},
"twImageSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png"
},
"twitterCard": "summary_large_image"
}
},
"ttid": 33559,
"slug": "openai",
"isLoading": false,
"link": "/news/tag/openai"
},
"news_31795": {
"type": "terms",
"id": "news_31795",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "31795",
"found": true
},
"relationships": {},
"featImg": null,
"name": "California",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "California Archives | KQED News",
"ogDescription": null
},
"ttid": 31812,
"slug": "california",
"isLoading": false,
"link": "/news/category/california"
},
"news_6188": {
"type": "terms",
"id": "news_6188",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "6188",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Law and Justice",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Law and Justice Archives | KQED News",
"ogDescription": null
},
"ttid": 6212,
"slug": "law-and-justice",
"isLoading": false,
"link": "/news/category/law-and-justice"
},
"news_8": {
"type": "terms",
"id": "news_8",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "8",
"found": true
},
"relationships": {},
"featImg": null,
"name": "News",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "News Archives | KQED News",
"ogDescription": null
},
"ttid": 8,
"slug": "news",
"isLoading": false,
"link": "/news/category/news"
},
"news_248": {
"type": "terms",
"id": "news_248",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "248",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Technology",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Technology Archives | KQED News",
"ogDescription": null
},
"ttid": 256,
"slug": "technology",
"isLoading": false,
"link": "/news/category/technology"
},
"news_18538": {
"type": "terms",
"id": "news_18538",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "18538",
"found": true
},
"relationships": {},
"featImg": null,
"name": "California",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "California Archives | KQED News",
"ogDescription": null
},
"ttid": 31,
"slug": "california",
"isLoading": false,
"link": "/news/tag/california"
},
"news_32668": {
"type": "terms",
"id": "news_32668",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "32668",
"found": true
},
"relationships": {},
"featImg": null,
"name": "ChatGPT",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "ChatGPT Archives | KQED News",
"ogDescription": null
},
"ttid": 32685,
"slug": "chatgpt",
"isLoading": false,
"link": "/news/tag/chatgpt"
},
"news_22434": {
"type": "terms",
"id": "news_22434",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "22434",
"found": true
},
"relationships": {},
"featImg": null,
"name": "death",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "death Archives | KQED News",
"ogDescription": null
},
"ttid": 22451,
"slug": "death",
"isLoading": false,
"link": "/news/tag/death"
},
"news_23333": {
"type": "terms",
"id": "news_23333",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "23333",
"found": true
},
"relationships": {},
"featImg": null,
"name": "families",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "families Archives | KQED News",
"ogDescription": null
},
"ttid": 23350,
"slug": "families",
"isLoading": false,
"link": "/news/tag/families"
},
"news_18543": {
"type": "terms",
"id": "news_18543",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "18543",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Health",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Health Archives | KQED News",
"ogDescription": null
},
"ttid": 466,
"slug": "health",
"isLoading": false,
"link": "/news/tag/health"
},
"news_21891": {
"type": "terms",
"id": "news_21891",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "21891",
"found": true
},
"relationships": {},
"featImg": null,
"name": "lawsuits",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "lawsuits Archives | KQED News",
"ogDescription": null
},
"ttid": 21908,
"slug": "lawsuits",
"isLoading": false,
"link": "/news/tag/lawsuits"
},
"news_2109": {
"type": "terms",
"id": "news_2109",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "2109",
"found": true
},
"relationships": {},
"featImg": null,
"name": "mental health",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "mental health Archives | KQED News",
"ogDescription": null
},
"ttid": 2124,
"slug": "mental-health",
"isLoading": false,
"link": "/news/tag/mental-health"
},
"news_33543": {
"type": "terms",
"id": "news_33543",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33543",
"found": true
},
"relationships": {},
"name": "Sam Altman",
"slug": "sam-altman",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Sam Altman | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null,
"metaRobotsNoIndex": "noindex"
},
"ttid": 33560,
"isLoading": false,
"link": "/news/tag/sam-altman"
},
"news_34586": {
"type": "terms",
"id": "news_34586",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34586",
"found": true
},
"relationships": {},
"name": "Silicon Valley",
"slug": "silicon-valley",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Silicon Valley | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 34603,
"isLoading": false,
"link": "/news/tag/silicon-valley"
},
"news_2883": {
"type": "terms",
"id": "news_2883",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "2883",
"found": true
},
"relationships": {},
"featImg": null,
"name": "suicide",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "suicide Archives | KQED News",
"ogDescription": null
},
"ttid": 2901,
"slug": "suicide",
"isLoading": false,
"link": "/news/tag/suicide"
},
"news_1631": {
"type": "terms",
"id": "news_1631",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "1631",
"found": true
},
"relationships": {},
"name": "Technology",
"slug": "technology",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Technology | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 1643,
"isLoading": false,
"link": "/news/tag/technology"
},
"news_21121": {
"type": "terms",
"id": "news_21121",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "21121",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Teenagers",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Teenagers Archives | KQED News",
"ogDescription": null
},
"ttid": 21138,
"slug": "teenagers",
"isLoading": false,
"link": "/news/tag/teenagers"
},
"news_20385": {
"type": "terms",
"id": "news_20385",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "20385",
"found": true
},
"relationships": {},
"featImg": null,
"name": "teens",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "teens Archives | KQED News",
"ogDescription": null
},
"ttid": 20402,
"slug": "teens",
"isLoading": false,
"link": "/news/tag/teens"
},
"news_33747": {
"type": "terms",
"id": "news_33747",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33747",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Health",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Health Archives | KQED News",
"ogDescription": null
},
"ttid": 33764,
"slug": "health",
"isLoading": false,
"link": "/news/interest/health"
},
"news_33733": {
"type": "terms",
"id": "news_33733",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33733",
"found": true
},
"relationships": {},
"featImg": null,
"name": "News",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "News Archives | KQED News",
"ogDescription": null
},
"ttid": 33750,
"slug": "news",
"isLoading": false,
"link": "/news/interest/news"
},
"news_33732": {
"type": "terms",
"id": "news_33732",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33732",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Technology",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Technology Archives | KQED News",
"ogDescription": null
},
"ttid": 33749,
"slug": "technology",
"isLoading": false,
"link": "/news/interest/technology"
},
"news_13": {
"type": "terms",
"id": "news_13",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "13",
"found": true
},
"relationships": {},
"name": "Politics",
"slug": "politics",
"taxonomy": "category",
"description": null,
"featImg": null,
"headData": {
"title": "Politics | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 13,
"isLoading": false,
"link": "/news/category/politics"
},
"news_25184": {
"type": "terms",
"id": "news_25184",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "25184",
"found": true
},
"relationships": {},
"featImg": null,
"name": "AI",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "AI Archives | KQED News",
"ogDescription": null
},
"ttid": 25201,
"slug": "ai",
"isLoading": false,
"link": "/news/tag/ai"
},
"news_32664": {
"type": "terms",
"id": "news_32664",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "32664",
"found": true
},
"relationships": {},
"name": "AI software",
"slug": "ai-software",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "AI software | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 32681,
"isLoading": false,
"link": "/news/tag/ai-software"
},
"news_34755": {
"type": "terms",
"id": "news_34755",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34755",
"found": true
},
"relationships": {},
"name": "artificial intelligence",
"slug": "artificial-intelligence",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "artificial intelligence | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 34772,
"isLoading": false,
"link": "/news/tag/artificial-intelligence"
},
"news_29886": {
"type": "terms",
"id": "news_29886",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "29886",
"found": true
},
"relationships": {},
"featImg": null,
"name": "children's health",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "children's health Archives | KQED News",
"ogDescription": null
},
"ttid": 29903,
"slug": "childrens-health",
"isLoading": false,
"link": "/news/tag/childrens-health"
},
"news_22456": {
"type": "terms",
"id": "news_22456",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "22456",
"found": true
},
"relationships": {},
"featImg": null,
"name": "public safety",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "public safety Archives | KQED News",
"ogDescription": null
},
"ttid": 22473,
"slug": "public-safety",
"isLoading": false,
"link": "/news/tag/public-safety"
},
"news_38": {
"type": "terms",
"id": "news_38",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "38",
"found": true
},
"relationships": {},
"featImg": null,
"name": "San Francisco",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "San Francisco Archives | KQED News",
"ogDescription": null
},
"ttid": 58,
"slug": "san-francisco",
"isLoading": false,
"link": "/news/tag/san-francisco"
},
"news_33729": {
"type": "terms",
"id": "news_33729",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33729",
"found": true
},
"relationships": {},
"featImg": null,
"name": "San Francisco",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "San Francisco Archives | KQED News",
"ogDescription": null
},
"ttid": 33746,
"slug": "san-francisco",
"isLoading": false,
"link": "/news/interest/san-francisco"
},
"news_27626": {
"type": "terms",
"id": "news_27626",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "27626",
"found": true
},
"relationships": {},
"featImg": null,
"name": "featured-news",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "featured-news Archives | KQED News",
"ogDescription": null
},
"ttid": 27643,
"slug": "featured-news",
"isLoading": false,
"link": "/news/tag/featured-news"
},
"news_689": {
"type": "terms",
"id": "news_689",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "689",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Parenting",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Parenting Archives | KQED News",
"ogDescription": null
},
"ttid": 698,
"slug": "parenting",
"isLoading": false,
"link": "/news/tag/parenting"
},
"news_33738": {
"type": "terms",
"id": "news_33738",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33738",
"found": true
},
"relationships": {},
"featImg": null,
"name": "California",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "California Archives | KQED News",
"ogDescription": null
},
"ttid": 33755,
"slug": "california",
"isLoading": false,
"link": "/news/interest/california"
},
"news_457": {
"type": "terms",
"id": "news_457",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "457",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Health",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Health Archives | KQED News",
"ogDescription": null
},
"ttid": 16998,
"slug": "health",
"isLoading": false,
"link": "/news/category/health"
},
"news_32707": {
"type": "terms",
"id": "news_32707",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "32707",
"found": true
},
"relationships": {},
"featImg": null,
"name": "audience-news",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "audience-news Archives | KQED News",
"ogDescription": null
},
"ttid": 32724,
"slug": "audience-news",
"isLoading": false,
"link": "/news/tag/audience-news"
},
"news_178": {
"type": "terms",
"id": "news_178",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "178",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Stanford",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Stanford Archives | KQED News",
"ogDescription": null
},
"ttid": 185,
"slug": "stanford",
"isLoading": false,
"link": "/news/tag/stanford"
},
"news_1928": {
"type": "terms",
"id": "news_1928",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "1928",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Stanford University",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Stanford University Archives | KQED News",
"ogDescription": null
},
"ttid": 1943,
"slug": "stanford-university",
"isLoading": false,
"link": "/news/tag/stanford-university"
},
"news_28250": {
"type": "terms",
"id": "news_28250",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "28250",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Local",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Local Archives | KQED News",
"ogDescription": null
},
"ttid": 28267,
"slug": "local",
"isLoading": false,
"link": "/news/category/local"
},
"news_19960": {
"type": "terms",
"id": "news_19960",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "19960",
"found": true
},
"relationships": {},
"featImg": null,
"name": "public health",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "public health Archives | KQED News",
"ogDescription": null
},
"ttid": 19977,
"slug": "public-health",
"isLoading": false,
"link": "/news/tag/public-health"
},
"news_33731": {
"type": "terms",
"id": "news_33731",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33731",
"found": true
},
"relationships": {},
"featImg": null,
"name": "South Bay",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "South Bay Archives | KQED News",
"ogDescription": null
},
"ttid": 33748,
"slug": "south-bay",
"isLoading": false,
"link": "/news/interest/south-bay"
},
"news_34377": {
"type": "terms",
"id": "news_34377",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34377",
"found": true
},
"relationships": {},
"name": "featured-politics",
"slug": "featured-politics",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "featured-politics Archives | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 34394,
"isLoading": false,
"link": "/news/tag/featured-politics"
},
"news_16": {
"type": "terms",
"id": "news_16",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "16",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Gavin Newsom",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Gavin Newsom Archives | KQED News",
"ogDescription": null
},
"ttid": 16,
"slug": "gavin-newsom",
"isLoading": false,
"link": "/news/tag/gavin-newsom"
},
"news_17968": {
"type": "terms",
"id": "news_17968",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "17968",
"found": true
},
"relationships": {},
"name": "Politics",
"slug": "politics",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Politics | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 18002,
"isLoading": false,
"link": "/news/tag/politics"
},
"news_33734": {
"type": "terms",
"id": "news_33734",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33734",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Local Politics",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Local Politics Archives | KQED News",
"ogDescription": null
},
"ttid": 33751,
"slug": "local-politics",
"isLoading": false,
"link": "/news/interest/local-politics"
},
"news_1386": {
"type": "terms",
"id": "news_1386",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "1386",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Bay Area",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Bay Area Archives | KQED News",
"ogDescription": null
},
"ttid": 1398,
"slug": "bay-area",
"isLoading": false,
"link": "/news/tag/bay-area"
},
"news_3424": {
"type": "terms",
"id": "news_3424",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "3424",
"found": true
},
"relationships": {},
"featImg": null,
"name": "nonprofits",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "nonprofits Archives | KQED News",
"ogDescription": null
},
"ttid": 3442,
"slug": "nonprofits",
"isLoading": false,
"link": "/news/tag/nonprofits"
},
"news_21285": {
"type": "terms",
"id": "news_21285",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "21285",
"found": true
},
"relationships": {},
"featImg": null,
"name": "South Bay",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "South Bay Archives | KQED News",
"ogDescription": null
},
"ttid": 21302,
"slug": "south-bay",
"isLoading": false,
"link": "/news/tag/south-bay"
},
"news_19112": {
"type": "terms",
"id": "news_19112",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "19112",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Alex Padilla",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Alex Padilla Archives | KQED News",
"ogDescription": null
},
"ttid": 19129,
"slug": "alex-padilla",
"isLoading": false,
"link": "/news/tag/alex-padilla"
}
},
"userAgentReducer": {
"userAgent": "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko; compatible; ClaudeBot/1.0; +claudebot@anthropic.com)",
"isBot": true
},
"userPermissionsReducer": {
"wpLoggedIn": false
},
"localStorageReducer": {},
"browserHistoryReducer": [],
"eventsReducer": {},
"fssReducer": {},
"tvDailyScheduleReducer": {},
"tvWeeklyScheduleReducer": {},
"tvPrimetimeScheduleReducer": {},
"tvMonthlyScheduleReducer": {},
"userAccountReducer": {
"user": {
"email": null,
"emailStatus": "EMAIL_UNVALIDATED",
"loggedStatus": "LOGGED_OUT",
"loggingChecked": false,
"articles": [],
"firstName": null,
"lastName": null,
"phoneNumber": null,
"fetchingMembership": false,
"membershipError": false,
"memberships": [
{
"id": null,
"startDate": null,
"firstName": null,
"lastName": null,
"familyNumber": null,
"memberNumber": null,
"memberSince": null,
"expirationDate": null,
"pfsEligible": false,
"isSustaining": false,
"membershipLevel": "Prospect",
"membershipStatus": "Non Member",
"lastGiftDate": null,
"renewalDate": null
}
]
},
"authModal": {
"isOpen": false,
"view": "LANDING_VIEW"
},
"error": null
},
"youthMediaReducer": {},
"checkPleaseReducer": {
"filterData": {},
"restaurantData": []
},
"location": {
"pathname": "/news/tag/openai",
"previousPathname": "/"
}
}