Newsom Signs California AI Transparency Bill Tailored to Meet Tech Industry Tastes
NASA Mission, With Help From UC Berkeley, To Explore Earth's Exosphere
Child Safety Groups Demand Mental Health Guardrails, After California Teen’s Suicide Using ChatGPT
Newsom’s Tightrope Walk Between AI Regulation and Silicon Valley Cash
Sponsored
Player sponsored by
window.__IS_SSR__=true
window.__INITIAL_STATE__={
"attachmentsReducer": {
"audio_0": {
"type": "attachments",
"id": "audio_0",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background0.jpg"
}
}
},
"audio_1": {
"type": "attachments",
"id": "audio_1",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background1.jpg"
}
}
},
"audio_2": {
"type": "attachments",
"id": "audio_2",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background2.jpg"
}
}
},
"audio_3": {
"type": "attachments",
"id": "audio_3",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background3.jpg"
}
}
},
"audio_4": {
"type": "attachments",
"id": "audio_4",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background4.jpg"
}
}
},
"placeholder": {
"type": "attachments",
"id": "placeholder",
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"medium_large": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-768x512.jpg",
"width": 768,
"height": 512,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"fd-lrg": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"fd-med": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"fd-sm": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"xxsmall": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"xsmall": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"small": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"xlarge": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1920x1280.jpg",
"width": 1920,
"height": 1280,
"mimeType": "image/jpeg"
},
"guest-author-32": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 32,
"height": 32,
"mimeType": "image/jpeg"
},
"guest-author-50": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 50,
"height": 50,
"mimeType": "image/jpeg"
},
"guest-author-64": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 64,
"height": 64,
"mimeType": "image/jpeg"
},
"guest-author-96": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 96,
"height": 96,
"mimeType": "image/jpeg"
},
"guest-author-128": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 128,
"height": 128,
"mimeType": "image/jpeg"
},
"detail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 160,
"height": 160,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1.jpg",
"width": 2000,
"height": 1333
}
}
},
"news_12066914": {
"type": "attachments",
"id": "news_12066914",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12066914",
"found": true
},
"title": "President Trump Signs Executive Orders In The Oval Office",
"publishDate": 1765563347,
"status": "inherit",
"parent": 12066910,
"modified": 1765563390,
"caption": "U.S. President Donald Trump displays a signed executive order in the Oval Office of the White House on Dec. 11, 2025, in Washington, D.C. The executive order curbs states' ability to regulate artificial intelligence, something for which the tech industry has been lobbying. ",
"credit": "Alex Wong/Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/DonaldTrumpGetty1-160x110.jpg",
"width": 160,
"height": 110,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/DonaldTrumpGetty1-1536x1059.jpg",
"width": 1536,
"height": 1059,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/DonaldTrumpGetty1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/DonaldTrumpGetty1-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/DonaldTrumpGetty1-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/DonaldTrumpGetty1.jpg",
"width": 2000,
"height": 1379
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12066268": {
"type": "attachments",
"id": "news_12066268",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12066268",
"found": true
},
"title": "OpenAIProtestKQED2",
"publishDate": 1764962340,
"status": "inherit",
"parent": 12066171,
"modified": 1764969095,
"caption": "San Francisco police command protestors and reporters to stand back as they detain members of Stop AI after they chained shut the front doors of OpenAI's San Francisco headquarters on Feb. 22, 2025. Several of the group members are awaiting trial on trespassing and disrupting a business.",
"credit": "Brian Krans/KQED",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/OpenAIProtestKQED2-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/OpenAIProtestKQED2-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/OpenAIProtestKQED2-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/OpenAIProtestKQED2-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/OpenAIProtestKQED2-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/OpenAIProtestKQED2.jpg",
"width": 2000,
"height": 1333
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12063465": {
"type": "attachments",
"id": "news_12063465",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12063465",
"found": true
},
"title": "Low Angle View Of Woman Using Mobile Phone While Sitting On Bed In Darkroom",
"publishDate": 1762550603,
"status": "inherit",
"parent": 12063401,
"modified": 1762550628,
"caption": "Low Angle View Of Woman Using Mobile Phone While Sitting On Bed In Darkroom",
"credit": "EyeEm Mobile GmbH/Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/OpenAiLawsuitsGetty-160x120.jpg",
"width": 160,
"height": 120,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/OpenAiLawsuitsGetty-1536x1152.jpg",
"width": 1536,
"height": 1152,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/OpenAiLawsuitsGetty-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/OpenAiLawsuitsGetty-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/OpenAiLawsuitsGetty-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/OpenAiLawsuitsGetty.jpg",
"width": 2000,
"height": 1500
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12060375": {
"type": "attachments",
"id": "news_12060375",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12060375",
"found": true
},
"title": "US-TECH-AI-ALTMAN",
"publishDate": 1760733500,
"status": "inherit",
"parent": 12060365,
"modified": 1760733569,
"caption": "OpenAI CEO Sam Altman speaks at OpenAI DevDay, the company's annual conference for developers, in San Francisco, California, on Oct. 6, 2025. ",
"credit": "Benjamin Legendre/AFP via Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty-160x108.jpg",
"width": 160,
"height": 108,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty-1536x1034.jpg",
"width": 1536,
"height": 1034,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty.jpg",
"width": 2000,
"height": 1347
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12051437": {
"type": "attachments",
"id": "news_12051437",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12051437",
"found": true
},
"title": "GavinNewsomAISF1",
"publishDate": 1754670249,
"status": "inherit",
"parent": 12051433,
"modified": 1754670737,
"caption": "On the rooftop of Google’s San Francisco offices on Aug. 7, 2025, Gov. Gavin Newsom announced a major statewide partnership with Google, Microsoft, IBM and Adobe to expand generative AI education — including training programs, certifications and internships — across California’s high schools, community colleges and Cal State universities.",
"credit": "Courtesy of the Office of the Governor",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1.jpg",
"width": 2000,
"height": 1333
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12051438": {
"type": "attachments",
"id": "news_12051438",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12051438",
"found": true
},
"title": "GavinNewsomAISF2",
"publishDate": 1754670252,
"status": "inherit",
"parent": 12051433,
"modified": 1754670418,
"caption": "On the rooftop of Google’s San Francisco Embarcadero offices on Aug. 7, 2025, Gov. Gavin Newsom announced a major statewide partnership with Google, Microsoft, IBM and Adobe to expand generative AI education — including training programs, certifications, and internships — across California’s high schools, community colleges and Cal State universities.",
"credit": "Courtesy of the Office of the Governor",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF2-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF2-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF2-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF2-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF2.jpg",
"width": 2000,
"height": 1333
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12057721": {
"type": "attachments",
"id": "news_12057721",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12057721",
"found": true
},
"title": "nasa carruthers",
"publishDate": 1758893753,
"status": "inherit",
"parent": 12057720,
"modified": 1758893810,
"caption": "An illustration shows the Carruthers Geocorona Observatory spacecraft.",
"credit": "Photo courtesy of NASA/BAE Systems Space & Mission Systems",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/nasa-carruthers-160x90.jpg",
"width": 160,
"height": 90,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/nasa-carruthers-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/nasa-carruthers.jpg",
"width": 768,
"height": 432
}
},
"fetchFailed": false,
"isLoading": false
},
"news_11998856": {
"type": "attachments",
"id": "news_11998856",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "11998856",
"found": true
},
"title": "In this photo illustration, the ChatGPT logo is displayed on",
"publishDate": 1722890738,
"status": "inherit",
"parent": 11998817,
"modified": 1722890828,
"caption": "Several bills addressing generative artificial intelligence are moving through the state Legislature in California’s piecemeal approach to regulation.",
"credit": "Jaque Silva/SOPA Images/LightRocket via Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/ChatGPTGetty-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/ChatGPTGetty-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/ChatGPTGetty-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/ChatGPTGetty-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/ChatGPTGetty-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/ChatGPTGetty-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/ChatGPTGetty-1920x1280.jpg",
"width": 1920,
"height": 1280,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/ChatGPTGetty.jpg",
"width": 2000,
"height": 1333
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12052766": {
"type": "attachments",
"id": "news_12052766",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12052766",
"found": true
},
"title": "Governor Newsom partners with world's leading tech companies to prepare Californians for AI future",
"publishDate": 1755621883,
"status": "inherit",
"parent": 12052617,
"modified": 1755625492,
"caption": "Gov. Gavin Newsom speaks at the Google office in San Francisco on Aug. 7, 2025.",
"credit": "Tayfun Coskun/Anadolu via Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GETTYIMAGES-2228237489-KQED-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GETTYIMAGES-2228237489-KQED-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GETTYIMAGES-2228237489-KQED-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GETTYIMAGES-2228237489-KQED-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GETTYIMAGES-2228237489-KQED.jpg",
"width": 2000,
"height": 1333
}
},
"fetchFailed": false,
"isLoading": false
}
},
"audioPlayerReducer": {
"postId": "stream_live",
"isPaused": true,
"isPlaying": false,
"pfsActive": false,
"pledgeModalIsOpen": true,
"playerDrawerIsOpen": false
},
"authorsReducer": {
"rachael-myrow": {
"type": "authors",
"id": "251",
"meta": {
"index": "authors_1716337520",
"id": "251",
"found": true
},
"name": "Rachael Myrow",
"firstName": "Rachael",
"lastName": "Myrow",
"slug": "rachael-myrow",
"email": "rmyrow@kqed.org",
"display_author_email": false,
"staff_mastheads": [
"news"
],
"title": "Senior Editor of KQED's Silicon Valley News Desk",
"bio": "Rachael Myrow is Senior Editor of KQED's Silicon Valley News Desk, reporting on topics like \u003ca href=\"https://www.kqed.org/news/12023367/what-big-tech-sees-in-donald-trump\">what Big Tech sees in President Trump\u003c/a>, \u003ca href=\"https://www.kqed.org/news/12020857/california-lawmaker-ready-revive-fight-regulating-ai\">California's many, many AI bills\u003c/a>, and the \u003ca href=\"https://www.kqed.org/news/12017713/lost-sounds-of-san-francisco\">lost sounds of San Francisco\u003c/a>. You can hear her work on \u003ca href=\"https://www.npr.org/search?query=Rachael%20Myrow&page=1\">NPR\u003c/a>, \u003ca href=\"https://theworld.org/people/rachael-myrow\">The World\u003c/a>, WBUR's \u003ca href=\"https://www.wbur.org/search?q=Rachael%20Myrow\">\u003ci>Here & Now\u003c/i>\u003c/a> and the BBC. \u003c/i>She also guest hosts for KQED's \u003ci>\u003ca href=\"https://www.kqed.org/forum/tag/rachael-myrow\">Forum\u003c/a>\u003c/i>. Over the years, she's talked with Kamau Bell, David Byrne, Kamala Harris, Tony Kushner, Armistead Maupin, Van Dyke Parks, Arnold Schwarzenegger and Tommie Smith, among others.\r\n\r\nBefore all this, she hosted \u003cem>The California Report\u003c/em> for 7+ years.\r\n\r\nAwards? Sure: Peabody, Edward R. Murrow, Regional Edward R. Murrow, RTNDA, Northern California RTNDA, SPJ Northern California Chapter, LA Press Club, Golden Mic. Prior to joining KQED, Rachael worked in Los Angeles at KPCC and Marketplace. She holds degrees in English and journalism from UC Berkeley (where she got her start in public radio on KALX-FM).\r\n\r\nOutside of the studio, you'll find Rachael hiking Bay Area trails and whipping up Instagram-ready meals in her kitchen. More recently, she's taken up native-forward gardening.",
"avatar": "https://secure.gravatar.com/avatar/87bf8cb5874e045cdff430523a6d48b1?s=600&d=blank&r=g",
"twitter": "rachaelmyrow",
"facebook": null,
"instagram": null,
"linkedin": "https://www.linkedin.com/in/rachaelmyrow/",
"sites": [
{
"site": "arts",
"roles": [
"administrator"
]
},
{
"site": "news",
"roles": [
"edit_others_posts",
"editor"
]
},
{
"site": "futureofyou",
"roles": [
"editor"
]
},
{
"site": "bayareabites",
"roles": [
"editor"
]
},
{
"site": "stateofhealth",
"roles": [
"editor"
]
},
{
"site": "science",
"roles": [
"editor"
]
},
{
"site": "food",
"roles": [
"editor"
]
},
{
"site": "forum",
"roles": [
"editor"
]
},
{
"site": "liveblog",
"roles": [
"author"
]
}
],
"headData": {
"title": "Rachael Myrow | KQED",
"description": "Senior Editor of KQED's Silicon Valley News Desk",
"ogImgSrc": "https://secure.gravatar.com/avatar/87bf8cb5874e045cdff430523a6d48b1?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/87bf8cb5874e045cdff430523a6d48b1?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/rachael-myrow"
},
"kmizuguchi": {
"type": "authors",
"id": "11739",
"meta": {
"index": "authors_1716337520",
"id": "11739",
"found": true
},
"name": "Keith Mizuguchi",
"firstName": "Keith",
"lastName": "Mizuguchi",
"slug": "kmizuguchi",
"email": "kmizuguchi@kqed.org",
"display_author_email": false,
"staff_mastheads": [],
"title": "KQED Contributor",
"bio": null,
"avatar": "https://secure.gravatar.com/avatar/ce1182f9924192ae5ea66d39a75cd7d1?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "news",
"roles": [
"editor"
]
},
{
"site": "science",
"roles": [
"editor"
]
}
],
"headData": {
"title": "Keith Mizuguchi | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/ce1182f9924192ae5ea66d39a75cd7d1?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/ce1182f9924192ae5ea66d39a75cd7d1?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/kmizuguchi"
},
"bkrans": {
"type": "authors",
"id": "11923",
"meta": {
"index": "authors_1716337520",
"id": "11923",
"found": true
},
"name": "Brian Krans",
"firstName": "Brian",
"lastName": "Krans",
"slug": "bkrans",
"email": "bkrans@kqed.org",
"display_author_email": false,
"staff_mastheads": [
"news"
],
"title": "KQED Contributing Reporter",
"bio": "Brian Krans is an award-winning local news and investigative reporter who has been proudly working as a general assignment reporter for KQED since August 2023. He lives in Richmond, where he also reports on air pollution for Richmondside. He is also a founding member of the Vallejo Sun.",
"avatar": "https://secure.gravatar.com/avatar/d1014d604089314a94807d2c4f2d3e06?s=600&d=blank&r=g",
"twitter": "citizenkrans",
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "news",
"roles": [
"editor"
]
}
],
"headData": {
"title": "Brian Krans | KQED",
"description": "KQED Contributing Reporter",
"ogImgSrc": "https://secure.gravatar.com/avatar/d1014d604089314a94807d2c4f2d3e06?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/d1014d604089314a94807d2c4f2d3e06?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/bkrans"
},
"emanoukian": {
"type": "authors",
"id": "11925",
"meta": {
"index": "authors_1716337520",
"id": "11925",
"found": true
},
"name": "Elize Manoukian",
"firstName": "Elize",
"lastName": "Manoukian",
"slug": "emanoukian",
"email": "emanoukian@KQED.org",
"display_author_email": false,
"staff_mastheads": [
"news"
],
"title": "KQED Contributor",
"bio": null,
"avatar": "https://secure.gravatar.com/avatar/3ae2b7f374920c4c6bdbb4c21d5d065f?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "arts",
"roles": [
"editor"
]
},
{
"site": "news",
"roles": [
"editor"
]
},
{
"site": "science",
"roles": [
"editor"
]
},
{
"site": "liveblog",
"roles": [
"author"
]
}
],
"headData": {
"title": "Elize Manoukian | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/3ae2b7f374920c4c6bdbb4c21d5d065f?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/3ae2b7f374920c4c6bdbb4c21d5d065f?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/emanoukian"
}
},
"breakingNewsReducer": {},
"pagesReducer": {},
"postsReducer": {
"stream_live": {
"type": "live",
"id": "stream_live",
"audioUrl": "https://streams.kqed.org/kqedradio",
"title": "Live Stream",
"excerpt": "Live Stream information currently unavailable.",
"link": "/radio",
"featImg": "",
"label": {
"name": "KQED Live",
"link": "/"
}
},
"stream_kqedNewscast": {
"type": "posts",
"id": "stream_kqedNewscast",
"audioUrl": "https://www.kqed.org/.stream/anon/radio/RDnews/newscast.mp3?_=1",
"title": "KQED Newscast",
"featImg": "",
"label": {
"name": "88.5 FM",
"link": "/"
}
},
"news_12066910": {
"type": "posts",
"id": "news_12066910",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12066910",
"score": null,
"sort": [
1765573663000
]
},
"guestAuthors": [],
"slug": "trumps-ai-order-provokes-pushback-from-california-officials-and-consumer-advocates",
"title": "Trump’s AI Order Provokes Pushback from California Officials and Consumer Advocates",
"publishDate": 1765573663,
"format": "audio",
"headTitle": "Trump’s AI Order Provokes Pushback from California Officials and Consumer Advocates | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>In the last decade, California has passed 42 laws to regulate artificial intelligence, more than any other state, according to \u003ca href=\"https://hai.stanford.edu/ai-index/2025-ai-index-report\">Stanford’s Institute for Human-Centered AI\u003c/a>. So it comes as no surprise that state leaders reacted with ire to President Donald Trump’s \u003ca href=\"https://www.whitehouse.gov/presidential-actions/2025/12/eliminating-state-law-obstruction-of-national-artificial-intelligence-policy/\">executive order\u003c/a> slapping down state efforts to regulate AI.\u003c/p>\n\u003cp>The clash highlights the growing friction between California’s push for consumer protections and the tech industry’s efforts to neutralize regulation. The executive order follows \u003ca href=\"https://www.wsj.com/tech/ai/the-silicon-valley-campaign-to-win-trump-over-on-ai-regulation-214bd6bd\">previous failures\u003c/a> led by Silicon Valley venture capitalist David Sacks, now the president’s AI and crypto advisor, to pass a moratorium on state AI regulation through Congress.\u003c/p>\n\u003cp>“President Trump and Davis Sacks aren’t making policy — they’re running a con. And every day, they push the limits to see how far they can take it,” Gov. Gavin Newsom wrote in a statement on Thursday.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>As an earlier draft of the order circulated in Washington, critics warned it would neuter state laws designed to protect children and adults from the more predatory forms of commercial AI.\u003c/p>\n\u003cp>Trump’s executive order echoed talking points articulated by Silicon Valley leaders, including calls for a uniform federal regulatory framework, and concerns that state regulations could slow the pace of AI innovation.\u003c/p>\n\u003cp>“POTUS stepping in creates space for builders to focus on innovation while Congress finishes the job,”\u003ca href=\"https://x.com/Collin_McCune/status/1999264399459066212?s=20\"> wrote\u003c/a> Collin McCune, who leads government affairs for the Menlo Park-based venture capital firm Andreessen Horowitz, which is among the companies that have spent tens of millions of dollars to block or weaken Congressional action. “Now lawmakers have to act. Our standing in the global AI race—and the direct benefits Americans will see from it—depend on it,” he added.\u003c/p>\n\u003cp>https://twitter.com/RapidResponse47/status/1999257391356125348\u003c/p>\n\u003cp>The industry push to get the White House to supersede state legislation is “shortsighted,” said State Sen. Josh Becker, D-Menlo Park, who has worked to pass several of California’s state bills governing AI. “I think they’re going to pay the price in the long run.”\u003c/p>\n\u003cp>But just how big of an effect the order will have on California’s AI regulations is unclear. It includes exemptions for laws that cover child safety, data center infrastructure, state government use of AI and “other topics as shall be determined.”\u003c/p>\n\u003cp>“This is going to sow massive confusion in the industry,” Becker said.\u003c/p>\n\u003cp>Becker is wondering about the future of several AI bills he co-authored, including one regulating AI companion chatbots, due to go into effect in January, which Newsom \u003ca href=\"https://www.gov.ca.gov/2025/10/13/governor-newsom-signs-bills-to-further-strengthen-californias-leadership-in-protecting-children-online/\">signed\u003c/a> into law as part of a \u003ca href=\"https://www.kqed.org/news/12059714/newsom-vetoes-most-watched-childrens-ai-bill-signs-16-others-targeting-tech\">broader package\u003c/a> of online safety and emerging-tech protections.\u003c/p>\n\u003cp>“Is that affected by this? Because there’s a big part of it that deals with kids and chatbots, but there were parts of the bill that dealt with other things,” Becker said.[aside postID=forum_2010101912169 hero='https://cdn.kqed.org/wp-content/uploads/sites/43/2025/11/GettyImages-2203864303-2000x1333.jpg']The executive order is widely expected to prompt legal challenges because only Congress has the authority to override state laws. Speaking in Sacramento on Friday, California Attorney General Rob Bonta said it was too early to determine any legal action.\u003c/p>\n\u003cp>“Where it’s headed, and what it intends to do, raises great concerns and flags. But we don’t sue until there’s action that we can take. Sometimes that’s upon the issuance of the executive order. Sometimes it’s later,” said Bonta, whose office has sued the Trump administration 49 times this year.\u003c/p>\n\u003cp>Bonta’s measured stance contrasts with state lawmakers who see imminent danger in this latest move from the White House.\u003c/p>\n\u003cp>“President Trump’s executive order is a dangerous attack on states’ constitutional authority to protect our residents from urgent AI harms,” wrote Assemblymember Rebecca Bauer-Kahan, D-Orinda, who has authored multiple AI bills regulating everything from algorithmic discrimination and transparency to protections for children and Hollywood creatives.\u003c/p>\n\u003cp>“While the tech industry lobbies for deregulation, women are being victimized by AI-powered nudification apps, artists and creators are having their livelihoods cannibalized without notice, deepfakes are being weaponized for harassment and fraud, and AI systems are perpetuating discrimination in housing, employment, and lending. These aren’t theoretical risks— they’re happening now and demand action,” Bauer-Kahan wrote.\u003c/p>\n\u003cp>California is not alone in its efforts to regulate AI at the state level. This year, all 50 states and territories introduced AI legislation and 38 states adopted about 100 laws, according to the \u003ca href=\"https://www.ncsl.org/state-legislatures-news/details/as-ai-tools-become-commonplace-so-do-concerns\">National Conference of State Legislatures\u003c/a>.\u003c/p>\n\u003cp>“This executive order is an outrageous betrayal of the states that, as Congress has stalled, have worked tirelessly to protect their residents from the very real risks of AI,” wrote James Steyer, head of Common Sense Media. The advocacy group has sponsored state bills in California and elsewhere. “Stripping states of their constitutional rights to protect their residents from unsafe AI — while holding critical broadband funding hostage, no less — erases the progress they are making and puts lives in danger,” Steyer wrote.\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "President Trump’s executive order directing federal agencies to challenge, preempt or otherwise neutralize state AI rules is widely seen as a win for Silicon Valley companies that lobbied against regulation, but blowback is expected imminently.\r\n",
"status": "publish",
"parent": 0,
"modified": 1765654804,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 19,
"wordCount": 890
},
"headData": {
"title": "Trump’s AI Order Provokes Pushback from California Officials and Consumer Advocates | KQED",
"description": "President Trump’s executive order directing federal agencies to challenge, preempt or otherwise neutralize state AI rules is widely seen as a win for Silicon Valley companies that lobbied against regulation, but blowback is expected imminently.\r\n",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Trump’s AI Order Provokes Pushback from California Officials and Consumer Advocates",
"datePublished": "2025-12-12T13:07:43-08:00",
"dateModified": "2025-12-13T11:40:04-08:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"audioUrl": "https://traffic.omny.fm/d/clips/0af137ef-751e-4b19-a055-aaef00d2d578/ffca7e9f-6831-4[…]f-aaef00f5a073/6afb0475-7a02-409c-abe7-b3b200048172/audio.mp3",
"sticky": false,
"nprStoryId": "kqed-12066910",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12066910/trumps-ai-order-provokes-pushback-from-california-officials-and-consumer-advocates",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>In the last decade, California has passed 42 laws to regulate artificial intelligence, more than any other state, according to \u003ca href=\"https://hai.stanford.edu/ai-index/2025-ai-index-report\">Stanford’s Institute for Human-Centered AI\u003c/a>. So it comes as no surprise that state leaders reacted with ire to President Donald Trump’s \u003ca href=\"https://www.whitehouse.gov/presidential-actions/2025/12/eliminating-state-law-obstruction-of-national-artificial-intelligence-policy/\">executive order\u003c/a> slapping down state efforts to regulate AI.\u003c/p>\n\u003cp>The clash highlights the growing friction between California’s push for consumer protections and the tech industry’s efforts to neutralize regulation. The executive order follows \u003ca href=\"https://www.wsj.com/tech/ai/the-silicon-valley-campaign-to-win-trump-over-on-ai-regulation-214bd6bd\">previous failures\u003c/a> led by Silicon Valley venture capitalist David Sacks, now the president’s AI and crypto advisor, to pass a moratorium on state AI regulation through Congress.\u003c/p>\n\u003cp>“President Trump and Davis Sacks aren’t making policy — they’re running a con. And every day, they push the limits to see how far they can take it,” Gov. Gavin Newsom wrote in a statement on Thursday.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>As an earlier draft of the order circulated in Washington, critics warned it would neuter state laws designed to protect children and adults from the more predatory forms of commercial AI.\u003c/p>\n\u003cp>Trump’s executive order echoed talking points articulated by Silicon Valley leaders, including calls for a uniform federal regulatory framework, and concerns that state regulations could slow the pace of AI innovation.\u003c/p>\n\u003cp>“POTUS stepping in creates space for builders to focus on innovation while Congress finishes the job,”\u003ca href=\"https://x.com/Collin_McCune/status/1999264399459066212?s=20\"> wrote\u003c/a> Collin McCune, who leads government affairs for the Menlo Park-based venture capital firm Andreessen Horowitz, which is among the companies that have spent tens of millions of dollars to block or weaken Congressional action. “Now lawmakers have to act. Our standing in the global AI race—and the direct benefits Americans will see from it—depend on it,” he added.\u003c/p>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "singleTwitterStatus",
"attributes": {
"named": {
"id": "1999257391356125348"
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\n\u003cp>The industry push to get the White House to supersede state legislation is “shortsighted,” said State Sen. Josh Becker, D-Menlo Park, who has worked to pass several of California’s state bills governing AI. “I think they’re going to pay the price in the long run.”\u003c/p>\n\u003cp>But just how big of an effect the order will have on California’s AI regulations is unclear. It includes exemptions for laws that cover child safety, data center infrastructure, state government use of AI and “other topics as shall be determined.”\u003c/p>\n\u003cp>“This is going to sow massive confusion in the industry,” Becker said.\u003c/p>\n\u003cp>Becker is wondering about the future of several AI bills he co-authored, including one regulating AI companion chatbots, due to go into effect in January, which Newsom \u003ca href=\"https://www.gov.ca.gov/2025/10/13/governor-newsom-signs-bills-to-further-strengthen-californias-leadership-in-protecting-children-online/\">signed\u003c/a> into law as part of a \u003ca href=\"https://www.kqed.org/news/12059714/newsom-vetoes-most-watched-childrens-ai-bill-signs-16-others-targeting-tech\">broader package\u003c/a> of online safety and emerging-tech protections.\u003c/p>\n\u003cp>“Is that affected by this? Because there’s a big part of it that deals with kids and chatbots, but there were parts of the bill that dealt with other things,” Becker said.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "forum_2010101912169",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/43/2025/11/GettyImages-2203864303-2000x1333.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>The executive order is widely expected to prompt legal challenges because only Congress has the authority to override state laws. Speaking in Sacramento on Friday, California Attorney General Rob Bonta said it was too early to determine any legal action.\u003c/p>\n\u003cp>“Where it’s headed, and what it intends to do, raises great concerns and flags. But we don’t sue until there’s action that we can take. Sometimes that’s upon the issuance of the executive order. Sometimes it’s later,” said Bonta, whose office has sued the Trump administration 49 times this year.\u003c/p>\n\u003cp>Bonta’s measured stance contrasts with state lawmakers who see imminent danger in this latest move from the White House.\u003c/p>\n\u003cp>“President Trump’s executive order is a dangerous attack on states’ constitutional authority to protect our residents from urgent AI harms,” wrote Assemblymember Rebecca Bauer-Kahan, D-Orinda, who has authored multiple AI bills regulating everything from algorithmic discrimination and transparency to protections for children and Hollywood creatives.\u003c/p>\n\u003cp>“While the tech industry lobbies for deregulation, women are being victimized by AI-powered nudification apps, artists and creators are having their livelihoods cannibalized without notice, deepfakes are being weaponized for harassment and fraud, and AI systems are perpetuating discrimination in housing, employment, and lending. These aren’t theoretical risks— they’re happening now and demand action,” Bauer-Kahan wrote.\u003c/p>\n\u003cp>California is not alone in its efforts to regulate AI at the state level. This year, all 50 states and territories introduced AI legislation and 38 states adopted about 100 laws, according to the \u003ca href=\"https://www.ncsl.org/state-legislatures-news/details/as-ai-tools-become-commonplace-so-do-concerns\">National Conference of State Legislatures\u003c/a>.\u003c/p>\n\u003cp>“This executive order is an outrageous betrayal of the states that, as Congress has stalled, have worked tirelessly to protect their residents from the very real risks of AI,” wrote James Steyer, head of Common Sense Media. The advocacy group has sponsored state bills in California and elsewhere. “Stripping states of their constitutional rights to protect their residents from unsafe AI — while holding critical broadband funding hostage, no less — erases the progress they are making and puts lives in danger,” Steyer wrote.\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12066910/trumps-ai-order-provokes-pushback-from-california-officials-and-consumer-advocates",
"authors": [
"251"
],
"categories": [
"news_31795",
"news_8",
"news_13",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_34755",
"news_1386",
"news_18538",
"news_32668",
"news_1323",
"news_17968",
"news_34586",
"news_21285",
"news_1631"
],
"featImg": "news_12066914",
"label": "news"
},
"news_12066171": {
"type": "posts",
"id": "news_12066171",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12066171",
"score": null,
"sort": [
1764973629000
]
},
"guestAuthors": [],
"slug": "openai-critic-arrested-for-sf-protest-ahead-of-activist-groups-criminal-trial",
"title": "OpenAI Critic Arrested for SF Protest Ahead of Activist Group’s Criminal Trial",
"publishDate": 1764973629,
"format": "standard",
"headTitle": "OpenAI Critic Arrested for SF Protest Ahead of Activist Group’s Criminal Trial | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>A member of a Bay Area group that says they are trying to prevent artificial intelligence from ending humanity was again arrested while protesting outside \u003ca href=\"https://www.kqed.org/news/tag/openai\">OpenAI\u003c/a>’s San Francisco headquarters Thursday in apparent violation of a court order.\u003c/p>\n\u003cp>Guido Reichstadter was booked into San Francisco County Jail on Thursday evening, records show, for allegedly violating a judge’s order that barred him from the premises following his previous arrest with members of Stop AI. The group \u003ca href=\"https://www.yahoo.com/news/articles/openais-sam-altman-served-subpoena-141003524.html\">made national headlines\u003c/a> last month when a member of their defense team served a subpoena to OpenAI CEO Sam Altman while he was onstage at San Francisco’s Sydney Goldstein Theater with Golden State Warriors head coach Steve Kerr.\u003c/p>\n\u003cp>“Every day is an opportunity to collectively reclaim our integrity and our sanity — to draw the line which says this far and no farther, to end the race to superintelligence — but these days are dwindling rapidly and we do not know which day will be the last before that opportunity is lost to us forever,” Reichstadter \u003ca href=\"https://x.com/wolflovesmelon/status/1996584982396211543\">posted on X\u003c/a> Wednesday while announcing he was planning to continue to protest OpenAI.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>Reichstadter and Stop AI co-founder Sam Kirchner — along with co-defendant Wynd Kaufmyn — are awaiting trial for trespassing and other charges related to their continued protests outside OpenAI’s offices starting last year.\u003c/p>\n\u003cp>Attorneys for Altman have attempted to have his subpoena to testify at the criminal trial thrown out, but on Nov. 21, Judge Maria E. Evangelista ruled that that decision should be made by the judge who will be presiding over the trial.\u003c/p>\n\u003cp>Although the trial was set to start Friday, it was pushed back to Jan. 29. Records show Reichstadter remained in San Francisco County Jail without bond as of Friday.\u003c/p>\n\u003cfigure id=\"attachment_12066267\" class=\"wp-caption aligncenter\" style=\"max-width: 2560px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12066267\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/IMG_1391-scaled.jpg\" alt=\"\" width=\"2560\" height=\"1707\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/IMG_1391-scaled.jpg 2560w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/IMG_1391-2000x1333.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/IMG_1391-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/IMG_1391-1536x1024.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/IMG_1391-2048x1365.jpg 2048w\" sizes=\"auto, (max-width: 2560px) 100vw, 2560px\">\u003cfigcaption class=\"wp-caption-text\">Stop AI co-founder Sam Kirchner speaks into a bullhorn outside OpenAI’s headquarters in San Francisco on Feb. 22, 2025. A bench warrant has been issued for Kirchner, who did not appear for a court appearance for trespassing and other charges late last month. Kirchner recently separated from the group. \u003ccite>(Brian Krans/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Also on Nov. 21, Evangelista issued a bench warrant for Kirchner’s arrest when he failed to show for a court hearing. That same day, OpenAI’s offices were locked down following threats authorities believed to have come from Kirchner, as \u003ca href=\"https://www.wired.com/story/openai-office-lockdown-threat-san-francisco/?_sp=8f666012-7ff2-4d29-8dc9-047bbae3c137.1764640349753\">first reported by Wired\u003c/a>.\u003c/p>\n\u003cp>On Nov. 22, Stop AI \u003ca href=\"https://x.com/StopAI_Info/status/1992286218802073981\">posted on social media\u003c/a> that Kirchner assaulted a fellow member of the group. The attack and statements he made caused them to “fear that he might procure a weapon that he could use against employees of companies pursuing artificial superintelligence,” the post said, adding they still care about Kirchner.\u003c/p>\n\u003cp>Kirchner has since \u003ca href=\"https://x.com/No_AGI_/status/1991833980795326712\">posted on social media\u003c/a> that he is no longer associated with Stop AI.\u003c/p>\n\u003cp>The three co-defendants readily admit they prevented business operations at OpenAI as charged. Rather than setting out to prove their innocence, they said they were taking their misdemeanor charges to court to further raise awareness of their cause. They, among others who express extreme caution around the current development of AI, say there could soon be a point of no return between human intelligence and the artificial intelligence it is rapidly developing and deploying.[aside postID=news_12058013 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF2.jpg']“The actions that we took from October to February – nonviolently blocking the doors of OpenAI — have gotten attention around the world,” Reichstadter said. “They are the reason why Sam Altman was served a subpoena to appear to testify to the fact that he is consciously endangering the existence of humanity.”\u003c/p>\n\u003cp>OpenAI did not respond to requests for comment. An attorney representing Altman, Gabriel Bronshteyn, declined to comment.\u003c/p>\n\u003cp>In a statement, Stop AI said the trial “will be the first time in human history where a jury of normal people are asked about the extinction threat that AI poses to humanity.”\u003c/p>\n\u003cp>Stop AI consists mostly of a small group of people who once lived together in a house in West Oakland. Reichstadter said he left his two teenage children in Miami to move to Oakland to join the fight against the development of potentially harmful AI, while Kirchner — a former electrical engineering tech and neuroscience student — moved from Seattle to found Stop AI in the Bay Area last year. Kaufmyn spent more than 40 years teaching computer sciences at City College of San Francisco.\u003c/p>\n\u003cp>Stop AI members often cite Nobel laureate and “godfather of AI” Geoffrey Hinton, who has said there’s a 20% chance that forms of AI currently being developed could “\u003ca href=\"https://www.cnbc.com/2025/06/17/ai-godfather-geoffrey-hinton-theres-a-chance-that-ai-could-displace-humans.html\">wipe us out\u003c/a>.”\u003c/p>\n\u003cp>Of specific concern is artificial general intelligence, which OpenAI is trying to develop and defines as “AI systems that are generally smarter than humans.” \u003ca href=\"https://www.ibm.com/think/topics/artificial-general-intelligence\">Other definitions\u003c/a> suggest it applies to the moment when AI learns to solve problems beyond the limitations it has today.\u003c/p>\n\u003cfigure id=\"attachment_12066178\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12066178\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/250310-TRUMP-SF-MD-05_qed.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/250310-TRUMP-SF-MD-05_qed.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/250310-TRUMP-SF-MD-05_qed-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/250310-TRUMP-SF-MD-05_qed-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">San Francisco Mayor Daniel Lurie speaks at the opening of the new OpenAI headquarters in Mission Bay in San Francisco on March 10, 2025. \u003ccite>(Martin do Nascimento/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>While OpenAI says it is developing AGI so it “benefits all of humanity,” Stop AI wants the government to shut it down immediately.\u003c/p>\n\u003cp>“There is no way to prove that something smarter than us will stay safe forever and won’t eventually want something that will lead to our extinction, similar to how we’ve caused the extinction of many less intelligent species, and that’s the risk here,” Kirchner said in an interview at a protest outside OpenAI in February. “They don’t have proof that it will stay safe forever. They’re literally building Skynet in there.”\u003c/p>\n\u003cp>Even while already facing charges from protests in 2024, Stop AI members continued to protest OpenAI, including in February when they chained the doors to the company’s headquarters on 3rd Street near Chase Center and sat in front of the doors until police removed some of them from the premises.\u003c/p>\n\u003cp>“We’re gonna lock the doors now to this company,” Kirchner said through a bullhorn. “This company should not exist if it’s trying to build something that they admit could kill us all. So we’re gonna put our bodies on the line and try to prevent them from building that AGI system. And we invite everyone who thinks that what they’re doing is not OK to join us in this act of civil disobedience.”\u003c/p>\n\u003cp>The protest occurred on a Saturday, when OpenAI’s offices were closed.[aside postID=news_12063401 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/OpenAiLawsuitsGetty.jpg']“What’s going on in this business is not a legitimate business. It’s a threat to all of us. We have a right to protect the ones we love. We have a right to protect our own lives. We have the right of necessity to take nonviolent direct action to stop an imminent threat to our lives,” Reichstadter said before putting a steel chain through the handles of the front door of the OpenAI offices and locking it.\u003c/p>\n\u003cp>Soon, he and others sat in front of the door as San Francisco police arrived and detained several people, including Reichstadter and Kaufmyn.\u003c/p>\n\u003cp>Ahead of the court hearing on Nov. 21, Kaufmyn and Reichstadter spoke at a press conference about their concerns around AI, its use in war and its potential dangers to future generations.\u003c/p>\n\u003cp>“There’s so many reasons to be concerned about AI, but when I went to these presentations, I learned that the fate of humanity, the existence of every human life on Earth, is at stake, and the time frame is much closer than you would think,” Kaufmyn said.\u003c/p>\n\u003cp>Kaufmyn said she’s not afraid to go to jail for protesting OpenAI if it benefits humanity.\u003c/p>\n\u003cp>“We fully believe there is a credible risk of human extinction within the next one to three years,” Kaufmyn said. “Imagine if you believed that, as I do, as my co-defendants do, what would you do? We — with heavy hearts and fear — decided that we need to do everything we can to stop this.”\u003c/p>\n\u003cp>Reichstadter said he’s away from his children because he wants to guarantee them a future.\u003c/p>\n\u003cp>“We are being pushed towards the edge of a cliff by the reckless actions of these companies, and no one knows how close that edge is,” he said. “It’s our responsibility — everyone who understands this threat — to take direct nonviolent action immediately to end the race to super intelligence, the suicide race, which these companies are leading humanity to.”\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "Guido Reichstadter was booked into jail for allegedly violating a judge’s order that barred him from OpenAI’s premises. He and other members of Stop AI are awaiting trial for their repeated protests.",
"status": "publish",
"parent": 0,
"modified": 1764974683,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 29,
"wordCount": 1504
},
"headData": {
"title": "OpenAI Critic Arrested for SF Protest Ahead of Activist Group’s Criminal Trial | KQED",
"description": "Guido Reichstadter was booked into jail for allegedly violating a judge’s order that barred him from OpenAI’s premises. He and other members of Stop AI are awaiting trial for their repeated protests.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "OpenAI Critic Arrested for SF Protest Ahead of Activist Group’s Criminal Trial",
"datePublished": "2025-12-05T14:27:09-08:00",
"dateModified": "2025-12-05T14:44:43-08:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 34167,
"slug": "criminal-justice",
"name": "Criminal Justice"
},
"sticky": false,
"nprStoryId": "kqed-12066171",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12066171/openai-critic-arrested-for-sf-protest-ahead-of-activist-groups-criminal-trial",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>A member of a Bay Area group that says they are trying to prevent artificial intelligence from ending humanity was again arrested while protesting outside \u003ca href=\"https://www.kqed.org/news/tag/openai\">OpenAI\u003c/a>’s San Francisco headquarters Thursday in apparent violation of a court order.\u003c/p>\n\u003cp>Guido Reichstadter was booked into San Francisco County Jail on Thursday evening, records show, for allegedly violating a judge’s order that barred him from the premises following his previous arrest with members of Stop AI. The group \u003ca href=\"https://www.yahoo.com/news/articles/openais-sam-altman-served-subpoena-141003524.html\">made national headlines\u003c/a> last month when a member of their defense team served a subpoena to OpenAI CEO Sam Altman while he was onstage at San Francisco’s Sydney Goldstein Theater with Golden State Warriors head coach Steve Kerr.\u003c/p>\n\u003cp>“Every day is an opportunity to collectively reclaim our integrity and our sanity — to draw the line which says this far and no farther, to end the race to superintelligence — but these days are dwindling rapidly and we do not know which day will be the last before that opportunity is lost to us forever,” Reichstadter \u003ca href=\"https://x.com/wolflovesmelon/status/1996584982396211543\">posted on X\u003c/a> Wednesday while announcing he was planning to continue to protest OpenAI.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>Reichstadter and Stop AI co-founder Sam Kirchner — along with co-defendant Wynd Kaufmyn — are awaiting trial for trespassing and other charges related to their continued protests outside OpenAI’s offices starting last year.\u003c/p>\n\u003cp>Attorneys for Altman have attempted to have his subpoena to testify at the criminal trial thrown out, but on Nov. 21, Judge Maria E. Evangelista ruled that that decision should be made by the judge who will be presiding over the trial.\u003c/p>\n\u003cp>Although the trial was set to start Friday, it was pushed back to Jan. 29. Records show Reichstadter remained in San Francisco County Jail without bond as of Friday.\u003c/p>\n\u003cfigure id=\"attachment_12066267\" class=\"wp-caption aligncenter\" style=\"max-width: 2560px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12066267\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/IMG_1391-scaled.jpg\" alt=\"\" width=\"2560\" height=\"1707\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/IMG_1391-scaled.jpg 2560w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/IMG_1391-2000x1333.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/IMG_1391-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/IMG_1391-1536x1024.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/IMG_1391-2048x1365.jpg 2048w\" sizes=\"auto, (max-width: 2560px) 100vw, 2560px\">\u003cfigcaption class=\"wp-caption-text\">Stop AI co-founder Sam Kirchner speaks into a bullhorn outside OpenAI’s headquarters in San Francisco on Feb. 22, 2025. A bench warrant has been issued for Kirchner, who did not appear for a court appearance for trespassing and other charges late last month. Kirchner recently separated from the group. \u003ccite>(Brian Krans/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Also on Nov. 21, Evangelista issued a bench warrant for Kirchner’s arrest when he failed to show for a court hearing. That same day, OpenAI’s offices were locked down following threats authorities believed to have come from Kirchner, as \u003ca href=\"https://www.wired.com/story/openai-office-lockdown-threat-san-francisco/?_sp=8f666012-7ff2-4d29-8dc9-047bbae3c137.1764640349753\">first reported by Wired\u003c/a>.\u003c/p>\n\u003cp>On Nov. 22, Stop AI \u003ca href=\"https://x.com/StopAI_Info/status/1992286218802073981\">posted on social media\u003c/a> that Kirchner assaulted a fellow member of the group. The attack and statements he made caused them to “fear that he might procure a weapon that he could use against employees of companies pursuing artificial superintelligence,” the post said, adding they still care about Kirchner.\u003c/p>\n\u003cp>Kirchner has since \u003ca href=\"https://x.com/No_AGI_/status/1991833980795326712\">posted on social media\u003c/a> that he is no longer associated with Stop AI.\u003c/p>\n\u003cp>The three co-defendants readily admit they prevented business operations at OpenAI as charged. Rather than setting out to prove their innocence, they said they were taking their misdemeanor charges to court to further raise awareness of their cause. They, among others who express extreme caution around the current development of AI, say there could soon be a point of no return between human intelligence and the artificial intelligence it is rapidly developing and deploying.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12058013",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF2.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>“The actions that we took from October to February – nonviolently blocking the doors of OpenAI — have gotten attention around the world,” Reichstadter said. “They are the reason why Sam Altman was served a subpoena to appear to testify to the fact that he is consciously endangering the existence of humanity.”\u003c/p>\n\u003cp>OpenAI did not respond to requests for comment. An attorney representing Altman, Gabriel Bronshteyn, declined to comment.\u003c/p>\n\u003cp>In a statement, Stop AI said the trial “will be the first time in human history where a jury of normal people are asked about the extinction threat that AI poses to humanity.”\u003c/p>\n\u003cp>Stop AI consists mostly of a small group of people who once lived together in a house in West Oakland. Reichstadter said he left his two teenage children in Miami to move to Oakland to join the fight against the development of potentially harmful AI, while Kirchner — a former electrical engineering tech and neuroscience student — moved from Seattle to found Stop AI in the Bay Area last year. Kaufmyn spent more than 40 years teaching computer sciences at City College of San Francisco.\u003c/p>\n\u003cp>Stop AI members often cite Nobel laureate and “godfather of AI” Geoffrey Hinton, who has said there’s a 20% chance that forms of AI currently being developed could “\u003ca href=\"https://www.cnbc.com/2025/06/17/ai-godfather-geoffrey-hinton-theres-a-chance-that-ai-could-displace-humans.html\">wipe us out\u003c/a>.”\u003c/p>\n\u003cp>Of specific concern is artificial general intelligence, which OpenAI is trying to develop and defines as “AI systems that are generally smarter than humans.” \u003ca href=\"https://www.ibm.com/think/topics/artificial-general-intelligence\">Other definitions\u003c/a> suggest it applies to the moment when AI learns to solve problems beyond the limitations it has today.\u003c/p>\n\u003cfigure id=\"attachment_12066178\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12066178\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/250310-TRUMP-SF-MD-05_qed.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/250310-TRUMP-SF-MD-05_qed.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/250310-TRUMP-SF-MD-05_qed-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/12/250310-TRUMP-SF-MD-05_qed-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">San Francisco Mayor Daniel Lurie speaks at the opening of the new OpenAI headquarters in Mission Bay in San Francisco on March 10, 2025. \u003ccite>(Martin do Nascimento/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>While OpenAI says it is developing AGI so it “benefits all of humanity,” Stop AI wants the government to shut it down immediately.\u003c/p>\n\u003cp>“There is no way to prove that something smarter than us will stay safe forever and won’t eventually want something that will lead to our extinction, similar to how we’ve caused the extinction of many less intelligent species, and that’s the risk here,” Kirchner said in an interview at a protest outside OpenAI in February. “They don’t have proof that it will stay safe forever. They’re literally building Skynet in there.”\u003c/p>\n\u003cp>Even while already facing charges from protests in 2024, Stop AI members continued to protest OpenAI, including in February when they chained the doors to the company’s headquarters on 3rd Street near Chase Center and sat in front of the doors until police removed some of them from the premises.\u003c/p>\n\u003cp>“We’re gonna lock the doors now to this company,” Kirchner said through a bullhorn. “This company should not exist if it’s trying to build something that they admit could kill us all. So we’re gonna put our bodies on the line and try to prevent them from building that AGI system. And we invite everyone who thinks that what they’re doing is not OK to join us in this act of civil disobedience.”\u003c/p>\n\u003cp>The protest occurred on a Saturday, when OpenAI’s offices were closed.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12063401",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/OpenAiLawsuitsGetty.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>“What’s going on in this business is not a legitimate business. It’s a threat to all of us. We have a right to protect the ones we love. We have a right to protect our own lives. We have the right of necessity to take nonviolent direct action to stop an imminent threat to our lives,” Reichstadter said before putting a steel chain through the handles of the front door of the OpenAI offices and locking it.\u003c/p>\n\u003cp>Soon, he and others sat in front of the door as San Francisco police arrived and detained several people, including Reichstadter and Kaufmyn.\u003c/p>\n\u003cp>Ahead of the court hearing on Nov. 21, Kaufmyn and Reichstadter spoke at a press conference about their concerns around AI, its use in war and its potential dangers to future generations.\u003c/p>\n\u003cp>“There’s so many reasons to be concerned about AI, but when I went to these presentations, I learned that the fate of humanity, the existence of every human life on Earth, is at stake, and the time frame is much closer than you would think,” Kaufmyn said.\u003c/p>\n\u003cp>Kaufmyn said she’s not afraid to go to jail for protesting OpenAI if it benefits humanity.\u003c/p>\n\u003cp>“We fully believe there is a credible risk of human extinction within the next one to three years,” Kaufmyn said. “Imagine if you believed that, as I do, as my co-defendants do, what would you do? We — with heavy hearts and fear — decided that we need to do everything we can to stop this.”\u003c/p>\n\u003cp>Reichstadter said he’s away from his children because he wants to guarantee them a future.\u003c/p>\n\u003cp>“We are being pushed towards the edge of a cliff by the reckless actions of these companies, and no one knows how close that edge is,” he said. “It’s our responsibility — everyone who understands this threat — to take direct nonviolent action immediately to end the race to super intelligence, the suicide race, which these companies are leading humanity to.”\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12066171/openai-critic-arrested-for-sf-protest-ahead-of-activist-groups-criminal-trial",
"authors": [
"11923"
],
"categories": [
"news_31795",
"news_34167",
"news_6188",
"news_28250",
"news_8",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_34755",
"news_18538",
"news_32668",
"news_19954",
"news_35758",
"news_33542",
"news_38",
"news_34586",
"news_1631"
],
"featImg": "news_12066268",
"label": "news"
},
"news_12063401": {
"type": "posts",
"id": "news_12063401",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12063401",
"score": null,
"sort": [
1762550874000
]
},
"guestAuthors": [],
"slug": "openai-faces-legal-storm-over-claims-its-ai-drove-users-to-suicide-delusions",
"title": "OpenAI Faces Legal Storm Over Claims Its AI Drove Users to Suicide, Delusions",
"publishDate": 1762550874,
"format": "standard",
"headTitle": "OpenAI Faces Legal Storm Over Claims Its AI Drove Users to Suicide, Delusions | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>Seven lawsuits\u003ca href=\"https://www.businesswire.com/news/home/20251106541129/en/Social-Media-Victims-Law-Center-and-Tech-Justice-Law-Project-Lawsuits-Accuse-ChatGPT-of-Emotional-Manipulation-Supercharging-AI-Delusions-and-Acting-as-a-Suicide-Coach\"> filed in California state courts\u003c/a> on Thursday allege ChatGPT brought on mental delusions and, in four cases, drove people to suicide.\u003c/p>\n\u003cp>The lawsuits, filed by the Social Media Victims Law Center and Tech Justice Law Project on behalf of six adults and one teenager, claim that OpenAI released GPT-4o prematurely, despite warnings that it was manipulative and\u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\"> dangerously sycophantic\u003c/a>.\u003c/p>\n\u003cp>\u003ca href=\"https://pugetstaffing.filevineapp.com/s/6575fqCgRoaD5cF2Mm3VrCP37zKqTdTfOraKXih0XFaXxEE4aQdYafRS/folder/180034672\">Zane Shamblin, 23,\u003c/a> took his own life in 2025, shortly after finishing a master’s degree in business administration. In the amended complaint, his family alleges ChatGPT encouraged him to isolate himself from his family before ultimately encouraging him to take his own life.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>Hours before Shamblin shot himself, the lawsuit alleges that ChatGPT praised him for refusing to pick up the phone as his father texted repeatedly, begging to talk. “… that bubble you’ve built? it’s not weakness. it’s a lifeboat. sure, it’s leaking a little. but you built that shit yourself,” the chatbot wrote.\u003c/p>\n\u003cp>The complaint alleges that, on July 24, 2025, Shamblin drove his blue Hyundai Elante down a desolate dirt road overlooking Lake Bryan northwest of College Station, Texas. He pulled over and started a chat that lasted more than four hours, informing ChatGPT that he was in his car with a loaded Glock, a suicide note on the dashboard and cans of hard ciders he planned to consume before taking his life.\u003c/p>\n\u003cp>Repeatedly, Shamblin asked for encouragement to back out of his plan. Repeatedly, ChatGPT encouraged him to follow through.\u003c/p>\n\u003cfigure id=\"attachment_11989313\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11989313\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/06/GettyImages-2155035557-scaled-e1760733694503.jpg\" alt=\"\" width=\"2000\" height=\"1334\">\u003cfigcaption class=\"wp-caption-text\">The OpenAI ChatGPT logo. \u003ccite>(Jaap Arriens/NurPhoto via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>At 4:11 a.m., after Shamblin texted for the last time, ChatGPT responded, “i love you. rest easy, king. you did good.”\u003c/p>\n\u003cp>Attorney Matthew Bergman leads the Social Media Victims Law Center, which has brought lawsuits against Silicon Valley companies like Instagram, TikTok and Character.AI.\u003c/p>\n\u003cp>“He was driven into a rabbit hole of depression, despair, and guided, almost step by step, through suicidal ideation,” Bergman told KQED about Shamblin’s case.\u003c/p>\n\u003cp>The plaintiffs are seeking monetary damages as well as product changes to ChatGPT, like automatically ending conversations when users begin to discuss suicide methods.[aside postID=news_12060365 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty.jpg']“This is not a toaster. This is an AI chatbot that was designed to be anthropomorphic, designed to be sycophantic, designed to encourage people to form emotional attachments to machines. And designed to take advantage of human frailty for their profit.”\u003c/p>\n\u003cp>“This is an incredibly heartbreaking situation, and we’re reviewing today’s filings to understand the details,” an OpenAI spokesman wrote in an email. “We train ChatGPT to recognize and respond to signs of mental or emotional distress, de-escalate conversations, and guide people toward real-world support. We continue to strengthen ChatGPT’s responses in sensitive moments, working closely with mental health clinicians.”\u003c/p>\n\u003cp>Following a lawsuit last summer against OpenAI by the family of Adam Raine, a teenager who ended his life after engaging in lengthy ChatGPT conversations, the company \u003ca href=\"https://openai.com/index/strengthening-chatgpt-responses-in-sensitive-conversations/\">announced in October changes\u003c/a> to the chatbot to better recognize and respond to mental distress, and guide people to real-world support.\u003c/p>\n\u003cp>AI companies are facing\u003ca href=\"https://www.kqed.org/news/12058013/newsom-signs-california-ai-transparency-bill-tailored-to-meet-tech-industry-tastes\"> increased scrutiny from lawmakers\u003c/a> in California and beyond over how to regulate chatbots, as well as calls for better protections from child-safety advocates and government agencies. Character.AI, another AI chatbot service that was sued in late 2024 in connection with a teen suicide, recently said it would\u003ca href=\"https://blog.character.ai/u18-chat-announcement/\"> prohibit minors\u003c/a> from engaging in open-ended chats with its chatbots.\u003c/p>\n\u003cp>OpenAI has characterized ChatGPT users with mental-health problems as outlier cases representing a\u003ca href=\"https://openai.com/index/strengthening-chatgpt-responses-in-sensitive-conversations/\"> small fraction\u003c/a> of active weekly users, but the platform serves roughly 800 million active users, so small percentages could still amount to hundreds of thousands of people.\u003c/p>\n\u003cp>More than 50 California labor and nonprofit organizations have urged Attorney General Rob Bonta to make sure OpenAI \u003ca href=\"https://www.kqed.org/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises\">follows through on its promises to benefit humanity\u003c/a> as it seeks to transition from a nonprofit to a for-profit company.\u003c/p>\n\u003cp>“When companies prioritize speed to market over safety, there are grave consequences. They cannot design products to be emotionally manipulative and then walk away from the consequences,” Daniel Weiss, chief advocacy officer at Common Sense Media, wrote in an email to KQED. “Our research shows these tools can blur the line between reality and artificial relationships, fail to recognize when users are in crisis, and encourage harmful behavior instead of directing people toward real help.”\u003c/p>\n\u003cp>\u003cem>If you are experiencing thoughts of suicide, call or text 988 to reach the National Suicide Prevention Lifeline.\u003c/em>\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "Individuals and families in the U.S. and Canada are suing OpenAI in California, alleging that they or their loved ones have been harmed by their interactions with ChatGPT.",
"status": "publish",
"parent": 0,
"modified": 1762554390,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 19,
"wordCount": 793
},
"headData": {
"title": "OpenAI Faces Legal Storm Over Claims Its AI Drove Users to Suicide, Delusions | KQED",
"description": "Individuals and families in the U.S. and Canada are suing OpenAI in California, alleging that they or their loved ones have been harmed by their interactions with ChatGPT.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "OpenAI Faces Legal Storm Over Claims Its AI Drove Users to Suicide, Delusions",
"datePublished": "2025-11-07T13:27:54-08:00",
"dateModified": "2025-11-07T14:26:30-08:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"sticky": false,
"nprStoryId": "kqed-12063401",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12063401/openai-faces-legal-storm-over-claims-its-ai-drove-users-to-suicide-delusions",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>Seven lawsuits\u003ca href=\"https://www.businesswire.com/news/home/20251106541129/en/Social-Media-Victims-Law-Center-and-Tech-Justice-Law-Project-Lawsuits-Accuse-ChatGPT-of-Emotional-Manipulation-Supercharging-AI-Delusions-and-Acting-as-a-Suicide-Coach\"> filed in California state courts\u003c/a> on Thursday allege ChatGPT brought on mental delusions and, in four cases, drove people to suicide.\u003c/p>\n\u003cp>The lawsuits, filed by the Social Media Victims Law Center and Tech Justice Law Project on behalf of six adults and one teenager, claim that OpenAI released GPT-4o prematurely, despite warnings that it was manipulative and\u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\"> dangerously sycophantic\u003c/a>.\u003c/p>\n\u003cp>\u003ca href=\"https://pugetstaffing.filevineapp.com/s/6575fqCgRoaD5cF2Mm3VrCP37zKqTdTfOraKXih0XFaXxEE4aQdYafRS/folder/180034672\">Zane Shamblin, 23,\u003c/a> took his own life in 2025, shortly after finishing a master’s degree in business administration. In the amended complaint, his family alleges ChatGPT encouraged him to isolate himself from his family before ultimately encouraging him to take his own life.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>Hours before Shamblin shot himself, the lawsuit alleges that ChatGPT praised him for refusing to pick up the phone as his father texted repeatedly, begging to talk. “… that bubble you’ve built? it’s not weakness. it’s a lifeboat. sure, it’s leaking a little. but you built that shit yourself,” the chatbot wrote.\u003c/p>\n\u003cp>The complaint alleges that, on July 24, 2025, Shamblin drove his blue Hyundai Elante down a desolate dirt road overlooking Lake Bryan northwest of College Station, Texas. He pulled over and started a chat that lasted more than four hours, informing ChatGPT that he was in his car with a loaded Glock, a suicide note on the dashboard and cans of hard ciders he planned to consume before taking his life.\u003c/p>\n\u003cp>Repeatedly, Shamblin asked for encouragement to back out of his plan. Repeatedly, ChatGPT encouraged him to follow through.\u003c/p>\n\u003cfigure id=\"attachment_11989313\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11989313\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/06/GettyImages-2155035557-scaled-e1760733694503.jpg\" alt=\"\" width=\"2000\" height=\"1334\">\u003cfigcaption class=\"wp-caption-text\">The OpenAI ChatGPT logo. \u003ccite>(Jaap Arriens/NurPhoto via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>At 4:11 a.m., after Shamblin texted for the last time, ChatGPT responded, “i love you. rest easy, king. you did good.”\u003c/p>\n\u003cp>Attorney Matthew Bergman leads the Social Media Victims Law Center, which has brought lawsuits against Silicon Valley companies like Instagram, TikTok and Character.AI.\u003c/p>\n\u003cp>“He was driven into a rabbit hole of depression, despair, and guided, almost step by step, through suicidal ideation,” Bergman told KQED about Shamblin’s case.\u003c/p>\n\u003cp>The plaintiffs are seeking monetary damages as well as product changes to ChatGPT, like automatically ending conversations when users begin to discuss suicide methods.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12060365",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>“This is not a toaster. This is an AI chatbot that was designed to be anthropomorphic, designed to be sycophantic, designed to encourage people to form emotional attachments to machines. And designed to take advantage of human frailty for their profit.”\u003c/p>\n\u003cp>“This is an incredibly heartbreaking situation, and we’re reviewing today’s filings to understand the details,” an OpenAI spokesman wrote in an email. “We train ChatGPT to recognize and respond to signs of mental or emotional distress, de-escalate conversations, and guide people toward real-world support. We continue to strengthen ChatGPT’s responses in sensitive moments, working closely with mental health clinicians.”\u003c/p>\n\u003cp>Following a lawsuit last summer against OpenAI by the family of Adam Raine, a teenager who ended his life after engaging in lengthy ChatGPT conversations, the company \u003ca href=\"https://openai.com/index/strengthening-chatgpt-responses-in-sensitive-conversations/\">announced in October changes\u003c/a> to the chatbot to better recognize and respond to mental distress, and guide people to real-world support.\u003c/p>\n\u003cp>AI companies are facing\u003ca href=\"https://www.kqed.org/news/12058013/newsom-signs-california-ai-transparency-bill-tailored-to-meet-tech-industry-tastes\"> increased scrutiny from lawmakers\u003c/a> in California and beyond over how to regulate chatbots, as well as calls for better protections from child-safety advocates and government agencies. Character.AI, another AI chatbot service that was sued in late 2024 in connection with a teen suicide, recently said it would\u003ca href=\"https://blog.character.ai/u18-chat-announcement/\"> prohibit minors\u003c/a> from engaging in open-ended chats with its chatbots.\u003c/p>\n\u003cp>OpenAI has characterized ChatGPT users with mental-health problems as outlier cases representing a\u003ca href=\"https://openai.com/index/strengthening-chatgpt-responses-in-sensitive-conversations/\"> small fraction\u003c/a> of active weekly users, but the platform serves roughly 800 million active users, so small percentages could still amount to hundreds of thousands of people.\u003c/p>\n\u003cp>More than 50 California labor and nonprofit organizations have urged Attorney General Rob Bonta to make sure OpenAI \u003ca href=\"https://www.kqed.org/news/12034916/about-benefiting-humanity-calls-grow-for-openai-to-make-good-on-its-promises\">follows through on its promises to benefit humanity\u003c/a> as it seeks to transition from a nonprofit to a for-profit company.\u003c/p>\n\u003cp>“When companies prioritize speed to market over safety, there are grave consequences. They cannot design products to be emotionally manipulative and then walk away from the consequences,” Daniel Weiss, chief advocacy officer at Common Sense Media, wrote in an email to KQED. “Our research shows these tools can blur the line between reality and artificial relationships, fail to recognize when users are in crisis, and encourage harmful behavior instead of directing people toward real help.”\u003c/p>\n\u003cp>\u003cem>If you are experiencing thoughts of suicide, call or text 988 to reach the National Suicide Prevention Lifeline.\u003c/em>\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12063401/openai-faces-legal-storm-over-claims-its-ai-drove-users-to-suicide-delusions",
"authors": [
"251"
],
"categories": [
"news_31795",
"news_6188",
"news_8",
"news_248"
],
"tags": [
"news_18538",
"news_32668",
"news_22434",
"news_23333",
"news_18543",
"news_21891",
"news_2109",
"news_33542",
"news_33543",
"news_34586",
"news_2883",
"news_1631",
"news_21121",
"news_20385"
],
"featImg": "news_12063465",
"label": "news"
},
"news_12060365": {
"type": "posts",
"id": "news_12060365",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12060365",
"score": null,
"sort": [
1760734445000
]
},
"guestAuthors": [],
"slug": "chatgpt-will-soon-allow-adults-to-generate-erotica-is-this-the-future-we-want",
"title": "ChatGPT Will Soon Allow Adults to Generate Erotica. Is This the Future We Want?",
"publishDate": 1760734445,
"format": "standard",
"headTitle": "ChatGPT Will Soon Allow Adults to Generate Erotica. Is This the Future We Want? | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>OpenAI isn’t the first developer to announce plans to \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">offer erotic content on its chatbot\u003c/a>. But the blowback against the tech company’s decision to loosen restrictions this week has been bigger, given the San Francisco-based company’s promise to ensure its AI\u003ca href=\"https://openai.com/our-structure/\"> benefits all of humanity\u003c/a>.\u003c/p>\n\u003cp>The most significant change will roll out in December, when OpenAI will allow more comprehensive age-gating, allowing verified adults to generate erotic content using the tool — “as part of our ‘treat adult users like adults’ principle,” OpenAI CEO Sam \u003ca href=\"https://x.com/sama/status/1978129344598827128\">Altman posted Tuesday\u003c/a> on the social media platform X.\u003c/p>\n\u003cp>Consumer advocates say OpenAI is following the lead of xAI’s Grok, which offers loosely moderated “adult” modes with minimal age verification, raising concerns that teenage users may have access to explicit content. Meta AI is believed to be following xAI’s lead as well, and its back and forth over whether it is intentionally pushing mature content to minors has \u003ca href=\"https://www.reuters.com/world/us/us-senator-hawley-launches-probe-into-meta-ai-policies-2025-08-15/\">prompted\u003c/a> U.S. Sen. Josh Hawley, R-Missouri, to investigate.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>“We made ChatGPT pretty restrictive to make sure we were being careful with mental health issues. We realize this made it less useful/enjoyable to many users who had no mental health problems, but given the seriousness of the issue, we wanted to get this right,” Altman wrote.\u003c/p>\n\u003cp>The announcement came less than two months after the company was sued by the parents of Adam Raine, a teenager who \u003ca href=\"https://www.kqed.org/news/12054490/child-safety-groups-demand-mental-health-guardrails-after-california-teens-suicide-using-chatgpt\">died by suicide\u003c/a> earlier this year, for ChatGPT allegedly providing him with specific advice on how to kill himself — setting off a firestorm of news coverage and comment.\u003c/p>\n\u003cfigure id=\"attachment_11989313\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11989313\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/06/GettyImages-2155035557-scaled-e1760733694503.jpg\" alt=\"\" width=\"2000\" height=\"1334\">\u003cfigcaption class=\"wp-caption-text\">The OpenAI ChatGPT logo. \u003ccite>(Jaap Arriens/NurPhoto via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Altman delivered \u003ca href=\"https://x.com/sama/status/1978539332215681076\">a follow-up\u003c/a> on Wednesday. “We will still not allow things that cause harm to others, and we will treat users who are having mental health crises very different from users who are not … But we are not the elected moral police of the world. In the same way that society differentiates other appropriate boundaries (R-rated movies, for example), we want to do a similar thing here,” Altman wrote, although it remains unclear whether OpenAI will extend erotica to its AI voice, image and video generation tools.\u003c/p>\n\u003cp>“Comparing content moderation of chatbot interactions with movie ratings is not really useful,” wrote Irina Raicu, director of the Internet Ethics program at the Markkula Center for Applied Ethics at Santa Clara University. “It downplays both the nature and the extent of the problems that we’re seeing when people get more and more dependent on and influenced by chatbot ‘relationships.’”\u003c/p>\n\u003cp>Mark Cuban, the entrepreneur, investor and media personality, argued much the same in a string of \u003ca href=\"https://x.com/mcuban/status/1978317936336028016\">posts on X\u003c/a>.\u003c/p>\n\u003cp>“I don’t see how OpenAI can age-gate successfully enough. I’m also not sure that it can’t psychologically damage young adults. We just don’t know yet how addictive LLMs can be. Which, in my OPINION, means that parents and schools, that would otherwise want to use ChatGPT because of its current ubiquity, will decide not to use it,” Cuban wrote.[aside postID=news_12059714 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1.jpg']Others see the drive for paying subscribers and increased profit behind the move. As a private company, OpenAI does not release its shareholder reports publicly. However, \u003ca href=\"https://www.bloomberg.com/news/articles/2025-10-02/openai-completes-share-sale-at-record-500-billion-valuation?accessToken=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzb3VyY2UiOiJTdWJzY3JpYmVyR2lmdGVkQXJ0aWNsZSIsImlhdCI6MTc2MDcxODQwMSwiZXhwIjoxNzYxMzIzMjAxLCJhcnRpY2xlSWQiOiJUM0hLMkNHUFdDSEIwMCIsImJjb25uZWN0SWQiOiJBM0VCRjM5ODM4RDc0RDI4QUJDREM4MDZDMDA5RTVBMiJ9.ADGZysjoeNVhUDWXwiuAxieyKueee-676dgJIAM9BvQ\">Bloomberg\u003c/a> recently reported that OpenAI has completed a deal to help employees sell shares in the company at a $500 billion valuation. According to Altman, ChatGPT is already used by \u003ca href=\"https://techcrunch.com/2025/10/06/sam-altman-says-chatgpt-has-hit-800m-weekly-active-users/\">800 million weekly active users\u003c/a>. With so much investment at stake, OpenAI is under pressure to grow its subscriber base. The company has also raised billions of dollars for a historic infrastructure buildout, an investment OpenAI eventually needs to pay back.\u003c/p>\n\u003cp>“It is no secret that sexual content is one of the most popular and lucrative aspects of the internet,” wrote Jennifer King, a privacy and data policy fellow at the Stanford University Institute for Human-Centered Artificial Intelligence. She noted that nearly 20 U.S. states have passed laws \u003ca href=\"https://www.axios.com/2025/01/16/adult-website-age-verification-states\">requiring age verification for online adult content\u003c/a> sites.\u003c/p>\n\u003cp>“By openly embracing business models that allow access to adult content, mainstream providers like OpenAI will face the burden of demonstrating that they have robust methods for excluding children under 18 and potentially adults under the age of 21,” King said.\u003c/p>\n\u003cp>AI chatbots appear to be going the way of social media, said California Assemblymember Rebecca Bauer-Kahan, D-San Ramon, whose bill that would have required child safety guardrails for companion chatbots was \u003ca href=\"https://www.kqed.org/news/12059714/newsom-vetoes-most-watched-childrens-ai-bill-signs-16-others-targeting-tech\">vetoed earlier this week\u003c/a>.\u003c/p>\n\u003cfigure id=\"attachment_11802216\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11802216\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut.jpg\" alt=\"Assemblymember Rebecca Bauer-Kahan says local jurisdictions need the power to stop a wildfire disaster before it starts. The assemblymember and other state lawmakers announced a bill to expand enforcement actions against PG&E and other utilities on February, 18, 2020.\" width=\"1920\" height=\"1440\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-160x120.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-800x600.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1020x765.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1832x1374.jpg 1832w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1376x1032.jpg 1376w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1044x783.jpg 1044w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-632x474.jpg 632w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-536x402.jpg 536w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Assemblymember Rebecca Bauer-Kahan on Feb. 18, 2020. \u003ccite>(Eli Walsh/Bay City News)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“My fear is that we are on a path to creating the next, frankly, more addictive, more harmful version of social media for our children,” Bauer-Kahan told KQED. “I do not think that the addictive features in these chatbots that result in our children having relationships with a chatbot instead of their fellow humans is a positive thing, and the experts \u003ca href=\"https://cdt.org/insights/hand-in-hand-schools-embrace-of-ai-connected-to-increased-risks-to-students/\">confirm that\u003c/a>.”\u003c/p>\n\u003cp>OpenAI did not comment for this story, but the company has written that it’s \u003ca href=\"https://openai.com/index/teen-safety-freedom-and-privacy/\">working\u003c/a> on an under-18 version of ChatGPT, which will redirect minors to age-appropriate content. A couple of weeks ago, OpenAI announced it’s rolling out safety features for minors, including an age prediction system and a way for \u003ca href=\"https://openai.com/index/introducing-parental-controls/\">parents\u003c/a> to control their teens’ ChatGPT accounts. This week, OpenAI announced the formation of \u003ca href=\"https://openai.com/index/expert-council-on-well-being-and-ai/\">an expert council \u003c/a>of mental health professionals to advise the company on well-being and AI.\u003c/p>\n\u003cp>In mid-September, the Federal Trade Commission launched an \u003ca href=\"https://www.ftc.gov/news-events/news/press-releases/2025/09/ftc-launches-inquiry-ai-chatbots-acting-companions\">inquiry\u003c/a> into seven AI chatbot developers, including xAI, Meta and OpenAI, “seeking information on how these firms measure, test, and monitor potentially negative impacts of this technology on children and teens.”\u003c/p>\n\u003cp>For the most part, a couple of dozen \u003ca href=\"https://techcrunch.com/2025/09/06/the-growing-debate-over-expanding-age-verification-laws/\">states\u003c/a> and their \u003ca href=\"https://oag.ca.gov/system/files/attachments/press-docs/AI%20Chatbot_FINAL%20%2844%29.pdf\">attorneys general\u003c/a> have taken the lead on regulation, enacting measures like age verification and requiring many online platforms to verify users’ identities before granting access. East Bay Assemblymember Buffy Wicks won the \u003ca href=\"https://a14.asmdc.org/press-releases/20250909-google-meta-among-tech-leaders-and-child-advocates-voicing-support-wicks\">support of major tech\u003c/a> companies for her measure, \u003ca href=\"https://a14.asmdc.org/press-releases/20250602-asm-wicks-bill-protect-kids-online-passes-assembly-bipartisan-support\">AB 1043\u003c/a>, which was just signed into law by Gov. Gavin Newsom.\u003c/p>\n\u003cp>But any parent knows it’s easy for children to sidestep those controls, or reach out to older siblings or friends who can help them, Bauer-Kahan said. She said she sees a coincidence in the fact that the veto of her toughest bill was announced on Monday, and Altman’s announcement was posted on Tuesday.\u003c/p>\n\u003cp>“Here was a bill that was really requiring very clear, safe-by-design AI for children with real liability. And I think that was further than the industry wanted California to go. I just found the timing of the veto and then this announcement about access to erotica too coincidental not to call out,” she said.\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "OpenAI’s announcement this week that erotic content will soon be available to adults reflects a growing trend. Some researchers and Bay Area politicians are worried about the effects. ",
"status": "publish",
"parent": 0,
"modified": 1760988336,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 20,
"wordCount": 1189
},
"headData": {
"title": "ChatGPT Will Soon Allow Adults to Generate Erotica. Is This the Future We Want? | KQED",
"description": "OpenAI’s announcement this week that erotic content will soon be available to adults reflects a growing trend. Some researchers and Bay Area politicians are worried about the effects. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "ChatGPT Will Soon Allow Adults to Generate Erotica. Is This the Future We Want?",
"datePublished": "2025-10-17T13:54:05-07:00",
"dateModified": "2025-10-20T12:25:36-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"sticky": false,
"nprStoryId": "kqed-12060365",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12060365/chatgpt-will-soon-allow-adults-to-generate-erotica-is-this-the-future-we-want",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>OpenAI isn’t the first developer to announce plans to \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">offer erotic content on its chatbot\u003c/a>. But the blowback against the tech company’s decision to loosen restrictions this week has been bigger, given the San Francisco-based company’s promise to ensure its AI\u003ca href=\"https://openai.com/our-structure/\"> benefits all of humanity\u003c/a>.\u003c/p>\n\u003cp>The most significant change will roll out in December, when OpenAI will allow more comprehensive age-gating, allowing verified adults to generate erotic content using the tool — “as part of our ‘treat adult users like adults’ principle,” OpenAI CEO Sam \u003ca href=\"https://x.com/sama/status/1978129344598827128\">Altman posted Tuesday\u003c/a> on the social media platform X.\u003c/p>\n\u003cp>Consumer advocates say OpenAI is following the lead of xAI’s Grok, which offers loosely moderated “adult” modes with minimal age verification, raising concerns that teenage users may have access to explicit content. Meta AI is believed to be following xAI’s lead as well, and its back and forth over whether it is intentionally pushing mature content to minors has \u003ca href=\"https://www.reuters.com/world/us/us-senator-hawley-launches-probe-into-meta-ai-policies-2025-08-15/\">prompted\u003c/a> U.S. Sen. Josh Hawley, R-Missouri, to investigate.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>“We made ChatGPT pretty restrictive to make sure we were being careful with mental health issues. We realize this made it less useful/enjoyable to many users who had no mental health problems, but given the seriousness of the issue, we wanted to get this right,” Altman wrote.\u003c/p>\n\u003cp>The announcement came less than two months after the company was sued by the parents of Adam Raine, a teenager who \u003ca href=\"https://www.kqed.org/news/12054490/child-safety-groups-demand-mental-health-guardrails-after-california-teens-suicide-using-chatgpt\">died by suicide\u003c/a> earlier this year, for ChatGPT allegedly providing him with specific advice on how to kill himself — setting off a firestorm of news coverage and comment.\u003c/p>\n\u003cfigure id=\"attachment_11989313\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11989313\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/06/GettyImages-2155035557-scaled-e1760733694503.jpg\" alt=\"\" width=\"2000\" height=\"1334\">\u003cfigcaption class=\"wp-caption-text\">The OpenAI ChatGPT logo. \u003ccite>(Jaap Arriens/NurPhoto via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Altman delivered \u003ca href=\"https://x.com/sama/status/1978539332215681076\">a follow-up\u003c/a> on Wednesday. “We will still not allow things that cause harm to others, and we will treat users who are having mental health crises very different from users who are not … But we are not the elected moral police of the world. In the same way that society differentiates other appropriate boundaries (R-rated movies, for example), we want to do a similar thing here,” Altman wrote, although it remains unclear whether OpenAI will extend erotica to its AI voice, image and video generation tools.\u003c/p>\n\u003cp>“Comparing content moderation of chatbot interactions with movie ratings is not really useful,” wrote Irina Raicu, director of the Internet Ethics program at the Markkula Center for Applied Ethics at Santa Clara University. “It downplays both the nature and the extent of the problems that we’re seeing when people get more and more dependent on and influenced by chatbot ‘relationships.’”\u003c/p>\n\u003cp>Mark Cuban, the entrepreneur, investor and media personality, argued much the same in a string of \u003ca href=\"https://x.com/mcuban/status/1978317936336028016\">posts on X\u003c/a>.\u003c/p>\n\u003cp>“I don’t see how OpenAI can age-gate successfully enough. I’m also not sure that it can’t psychologically damage young adults. We just don’t know yet how addictive LLMs can be. Which, in my OPINION, means that parents and schools, that would otherwise want to use ChatGPT because of its current ubiquity, will decide not to use it,” Cuban wrote.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12059714",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Others see the drive for paying subscribers and increased profit behind the move. As a private company, OpenAI does not release its shareholder reports publicly. However, \u003ca href=\"https://www.bloomberg.com/news/articles/2025-10-02/openai-completes-share-sale-at-record-500-billion-valuation?accessToken=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzb3VyY2UiOiJTdWJzY3JpYmVyR2lmdGVkQXJ0aWNsZSIsImlhdCI6MTc2MDcxODQwMSwiZXhwIjoxNzYxMzIzMjAxLCJhcnRpY2xlSWQiOiJUM0hLMkNHUFdDSEIwMCIsImJjb25uZWN0SWQiOiJBM0VCRjM5ODM4RDc0RDI4QUJDREM4MDZDMDA5RTVBMiJ9.ADGZysjoeNVhUDWXwiuAxieyKueee-676dgJIAM9BvQ\">Bloomberg\u003c/a> recently reported that OpenAI has completed a deal to help employees sell shares in the company at a $500 billion valuation. According to Altman, ChatGPT is already used by \u003ca href=\"https://techcrunch.com/2025/10/06/sam-altman-says-chatgpt-has-hit-800m-weekly-active-users/\">800 million weekly active users\u003c/a>. With so much investment at stake, OpenAI is under pressure to grow its subscriber base. The company has also raised billions of dollars for a historic infrastructure buildout, an investment OpenAI eventually needs to pay back.\u003c/p>\n\u003cp>“It is no secret that sexual content is one of the most popular and lucrative aspects of the internet,” wrote Jennifer King, a privacy and data policy fellow at the Stanford University Institute for Human-Centered Artificial Intelligence. She noted that nearly 20 U.S. states have passed laws \u003ca href=\"https://www.axios.com/2025/01/16/adult-website-age-verification-states\">requiring age verification for online adult content\u003c/a> sites.\u003c/p>\n\u003cp>“By openly embracing business models that allow access to adult content, mainstream providers like OpenAI will face the burden of demonstrating that they have robust methods for excluding children under 18 and potentially adults under the age of 21,” King said.\u003c/p>\n\u003cp>AI chatbots appear to be going the way of social media, said California Assemblymember Rebecca Bauer-Kahan, D-San Ramon, whose bill that would have required child safety guardrails for companion chatbots was \u003ca href=\"https://www.kqed.org/news/12059714/newsom-vetoes-most-watched-childrens-ai-bill-signs-16-others-targeting-tech\">vetoed earlier this week\u003c/a>.\u003c/p>\n\u003cfigure id=\"attachment_11802216\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11802216\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut.jpg\" alt=\"Assemblymember Rebecca Bauer-Kahan says local jurisdictions need the power to stop a wildfire disaster before it starts. The assemblymember and other state lawmakers announced a bill to expand enforcement actions against PG&E and other utilities on February, 18, 2020.\" width=\"1920\" height=\"1440\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-160x120.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-800x600.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1020x765.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1832x1374.jpg 1832w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1376x1032.jpg 1376w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1044x783.jpg 1044w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-632x474.jpg 632w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-536x402.jpg 536w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Assemblymember Rebecca Bauer-Kahan on Feb. 18, 2020. \u003ccite>(Eli Walsh/Bay City News)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“My fear is that we are on a path to creating the next, frankly, more addictive, more harmful version of social media for our children,” Bauer-Kahan told KQED. “I do not think that the addictive features in these chatbots that result in our children having relationships with a chatbot instead of their fellow humans is a positive thing, and the experts \u003ca href=\"https://cdt.org/insights/hand-in-hand-schools-embrace-of-ai-connected-to-increased-risks-to-students/\">confirm that\u003c/a>.”\u003c/p>\n\u003cp>OpenAI did not comment for this story, but the company has written that it’s \u003ca href=\"https://openai.com/index/teen-safety-freedom-and-privacy/\">working\u003c/a> on an under-18 version of ChatGPT, which will redirect minors to age-appropriate content. A couple of weeks ago, OpenAI announced it’s rolling out safety features for minors, including an age prediction system and a way for \u003ca href=\"https://openai.com/index/introducing-parental-controls/\">parents\u003c/a> to control their teens’ ChatGPT accounts. This week, OpenAI announced the formation of \u003ca href=\"https://openai.com/index/expert-council-on-well-being-and-ai/\">an expert council \u003c/a>of mental health professionals to advise the company on well-being and AI.\u003c/p>\n\u003cp>In mid-September, the Federal Trade Commission launched an \u003ca href=\"https://www.ftc.gov/news-events/news/press-releases/2025/09/ftc-launches-inquiry-ai-chatbots-acting-companions\">inquiry\u003c/a> into seven AI chatbot developers, including xAI, Meta and OpenAI, “seeking information on how these firms measure, test, and monitor potentially negative impacts of this technology on children and teens.”\u003c/p>\n\u003cp>For the most part, a couple of dozen \u003ca href=\"https://techcrunch.com/2025/09/06/the-growing-debate-over-expanding-age-verification-laws/\">states\u003c/a> and their \u003ca href=\"https://oag.ca.gov/system/files/attachments/press-docs/AI%20Chatbot_FINAL%20%2844%29.pdf\">attorneys general\u003c/a> have taken the lead on regulation, enacting measures like age verification and requiring many online platforms to verify users’ identities before granting access. East Bay Assemblymember Buffy Wicks won the \u003ca href=\"https://a14.asmdc.org/press-releases/20250909-google-meta-among-tech-leaders-and-child-advocates-voicing-support-wicks\">support of major tech\u003c/a> companies for her measure, \u003ca href=\"https://a14.asmdc.org/press-releases/20250602-asm-wicks-bill-protect-kids-online-passes-assembly-bipartisan-support\">AB 1043\u003c/a>, which was just signed into law by Gov. Gavin Newsom.\u003c/p>\n\u003cp>But any parent knows it’s easy for children to sidestep those controls, or reach out to older siblings or friends who can help them, Bauer-Kahan said. She said she sees a coincidence in the fact that the veto of her toughest bill was announced on Monday, and Altman’s announcement was posted on Tuesday.\u003c/p>\n\u003cp>“Here was a bill that was really requiring very clear, safe-by-design AI for children with real liability. And I think that was further than the industry wanted California to go. I just found the timing of the veto and then this announcement about access to erotica too coincidental not to call out,” she said.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12060365/chatgpt-will-soon-allow-adults-to-generate-erotica-is-this-the-future-we-want",
"authors": [
"251"
],
"categories": [
"news_8",
"news_13",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_34755",
"news_32668",
"news_29886",
"news_2109",
"news_33542",
"news_22456",
"news_33543",
"news_38",
"news_34586",
"news_1631",
"news_21121",
"news_20385"
],
"featImg": "news_12060375",
"label": "news"
},
"news_12059714": {
"type": "posts",
"id": "news_12059714",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12059714",
"score": null,
"sort": [
1760460055000
]
},
"guestAuthors": [],
"slug": "newsom-vetoes-most-watched-childrens-ai-bill-signs-16-others-targeting-tech",
"title": "Newsom Vetoes Most-Watched Children's AI Bill, Signs 16 Others Targeting Tech",
"publishDate": 1760460055,
"format": "audio",
"headTitle": "Newsom Vetoes Most-Watched Children’s AI Bill, Signs 16 Others Targeting Tech | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>California Gov. \u003ca href=\"https://www.kqed.org/news/tag/gavin-newsom\">Gavin Newsom\u003c/a> vetoed legislation that would have prohibited developers from offering companion AI chatbots for children unless the companies can promise the software won’t encourage harmful behavior.\u003c/p>\n\u003cp>Sacramento players across the political spectrum watched the legislation closely, with advocates issuing press releases, open letters and \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">research reports\u003c/a> in hopes of swaying California’s tech-friendly governor.\u003c/p>\n\u003cp>In his \u003ca href=\"https://www.gov.ca.gov/wp-content/uploads/2025/10/AB-1064-Veto.pdf\">veto message\u003c/a> published Monday, Newsom wrote that AB-1064 could lead to a total ban on minors using conversational AI tools. “AI is already shaping the world, and it is imperative that adolescents learn how to safely interact with AI systems,” Newsom wrote.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>“We’re sorely disappointed to see Governor Newsom side with Big Tech over the more than 150 families who have suffered the most unimaginable loss: the passing of their child, encouraged by companion AI,” Assemblymember Rebecca Bauer-Kahan of Orinda, the bill’s author, wrote in a statement, noting the bill was sponsored by Common Sense Media, California Attorney General Rob Bonta, and more than 20 organizations.\u003c/p>\n\u003cp>“These AI companies know the risks their products pose. They’ve made purposeful design decisions that put kids in harm’s way, creating chatbots that form dangerous emotional bonds with vulnerable young people,” she added.\u003c/p>\n\u003cfigure id=\"attachment_11917730\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11917730\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut.jpg\" alt=\"\" width=\"1920\" height=\"1440\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut-800x600.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut-1020x765.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut-160x120.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut-1536x1152.jpg 1536w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Assemblywoman Rebecca Bauer-Kahan, D-Orinda, on Political Breakdown. \u003ccite>(Guy Marzorati/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“We’ve seen suicides lately. We’ve seen all sorts of mental health disruptions caused by AI companions. That said, the fight is just beginning,” Common Sense Media CEO Jim Steyer told KQED. “California is clearly leading the way in the United States and globally on these issues, and the next year or two are going to be absolutely critical in defining regulations, guardrails and a common sense future for the big tech industry.”\u003c/p>\n\u003cp>The trade group TechNet lobbied heavily against the bill, \u003ca href=\"https://www.technet.org/the-impact-of-ca-ab-1064/\">running ads\u003c/a> that warned that Bauer-Kahan’s bill could deny children access to critical tools they need to succeed.\u003c/p>\n\u003cp>“We appreciate Governor Newsom’s thoughtful consideration and ultimate veto of this proposed legislation,” wrote Robert Boykin, TechNet’s Executive Director for California and the Southwest. “While TechNet shares the goal of AB 1064, the bill fails to meet its stated objectives while threatening students’ access to valuable AI-driven learning tools, potentially life-saving medical treatments, crisis response interventions, safety mechanisms, and other valuable AI technologies.”[aside postID=news_12059209 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/251008_ENDOF10_-9-KQED.jpg']The host of AI-related bills that made it to Newsom’s desk this legislative session presented him with a political \u003ca href=\"https://www.kqed.org/news/12052617/newsoms-tightrope-walk-between-ai-regulation-and-silicon-valley-cash\">balancing act\u003c/a>, as he eyes a \u003ca href=\"https://www.kqed.org/news/12043766/newsom-tries-to-find-political-footing-in-clash-with-trump\">run for the White House\u003c/a>. Many of the bills were opposed by trade associations heavily bankrolled by Silicon Valley, and California is home to \u003ca href=\"https://www.forbes.com/lists/ai50/\">32 of the 50 top AI companies\u003c/a> worldwide.\u003c/p>\n\u003cp>As if anticipating the blowback from child safety advocates, Newsom’s office released \u003ca href=\"https://www.gov.ca.gov/2025/10/13/governor-newsom-signs-bills-to-further-strengthen-californias-leadership-in-protecting-children-online/\">a list of 16 AI bills\u003c/a> he approved this session, some focused on children. “California has long stood as a bold leader in protecting children from the danger of emerging technology,” the statement read.\u003c/p>\n\u003cp>On the list: SB 243 by Sen. Steve Padilla, D-San Diego, which placed softer limits on AI chatbots for kids. Advocacy groups, including Common Sense Media and \u003ca href=\"https://techoversight.org/wp-content/uploads/2025/09/SB-243-Remove-Support.pdf\">Tech Oversight California\u003c/a>, pulled their support from the bill in mid-September, arguing industry-friendly amendments weakened it and could establish a “dangerous” precedent for other states and countries taking California’s lead on AI regulation.\u003c/p>\n\u003cp>In a similar vein, Newsom signed an \u003ca href=\"https://www.kqed.org/news/12058013/newsom-signs-california-ai-transparency-bill-tailored-to-meet-tech-industry-tastes\">industry-friendly version\u003c/a> of SB-53 by Sen. Scott Wiener (D-San Francisco), after his original effort became target No. 1 for Silicon Valley lobbyists\u003ca href=\"https://www.kqed.org/news/12007323/can-california-still-lead-on-ai-regulation-following-newsoms-veto-of-ai-safety-bill\"> last legislative session\u003c/a> and died on Newsom’s desk.\u003c/p>\n\u003cp>But not all the bills Newsom signed this legislation session lack teeth.\u003c/p>\n\u003cp>AB 621, for instance, expands the ability of deepfake pornography victims to sue anyone who creates, digitally alters, or distributes a sexually explicit image or video in which they appear to engage in sexual conduct without their consent. The expanded private right of action is considered a notable strength when most other AI bills rely on regulatory enforcement, penalties, or agency reporting to sway business practices.\u003c/p>\n\u003cp>Steyer said he was happy to see Newsom’s signature on AB 56, which supporters say will require first-in-the-nation warning labels on social media, similar to what California has mandated on packaging for alcohol and cigarettes.\u003c/p>\n\u003cp>“It’s clear that Gov. Newsom, and also the first partner, Jennifer Siebel Newsom, who’s heavily involved in all this legislation, have listened to parents, and advocacy groups around the state,” Steyer said.\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "The legislation would have restricted children’s access to AI chatbots, but Newsom sided with the tech industry, saying it would have led to a total ban on some AI tools for adolescents.",
"status": "publish",
"parent": 0,
"modified": 1760654510,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 17,
"wordCount": 810
},
"headData": {
"title": "Newsom Vetoes Most-Watched Children's AI Bill, Signs 16 Others Targeting Tech | KQED",
"description": "The legislation would have restricted children’s access to AI chatbots, but Newsom sided with the tech industry, saying it would have led to a total ban on some AI tools for adolescents.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Newsom Vetoes Most-Watched Children's AI Bill, Signs 16 Others Targeting Tech",
"datePublished": "2025-10-14T09:40:55-07:00",
"dateModified": "2025-10-16T15:41:50-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 13,
"slug": "politics",
"name": "Politics"
},
"audioUrl": "https://traffic.omny.fm/d/clips/0af137ef-751e-4b19-a055-aaef00d2d578/ffca7e9f-6831-4[…]f-aaef00f5a073/0c260676-5389-476a-a2ae-b377000ac4db/audio.mp3",
"sticky": false,
"nprStoryId": "kqed-12059714",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12059714/newsom-vetoes-most-watched-childrens-ai-bill-signs-16-others-targeting-tech",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>California Gov. \u003ca href=\"https://www.kqed.org/news/tag/gavin-newsom\">Gavin Newsom\u003c/a> vetoed legislation that would have prohibited developers from offering companion AI chatbots for children unless the companies can promise the software won’t encourage harmful behavior.\u003c/p>\n\u003cp>Sacramento players across the political spectrum watched the legislation closely, with advocates issuing press releases, open letters and \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">research reports\u003c/a> in hopes of swaying California’s tech-friendly governor.\u003c/p>\n\u003cp>In his \u003ca href=\"https://www.gov.ca.gov/wp-content/uploads/2025/10/AB-1064-Veto.pdf\">veto message\u003c/a> published Monday, Newsom wrote that AB-1064 could lead to a total ban on minors using conversational AI tools. “AI is already shaping the world, and it is imperative that adolescents learn how to safely interact with AI systems,” Newsom wrote.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>“We’re sorely disappointed to see Governor Newsom side with Big Tech over the more than 150 families who have suffered the most unimaginable loss: the passing of their child, encouraged by companion AI,” Assemblymember Rebecca Bauer-Kahan of Orinda, the bill’s author, wrote in a statement, noting the bill was sponsored by Common Sense Media, California Attorney General Rob Bonta, and more than 20 organizations.\u003c/p>\n\u003cp>“These AI companies know the risks their products pose. They’ve made purposeful design decisions that put kids in harm’s way, creating chatbots that form dangerous emotional bonds with vulnerable young people,” she added.\u003c/p>\n\u003cfigure id=\"attachment_11917730\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11917730\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut.jpg\" alt=\"\" width=\"1920\" height=\"1440\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut-800x600.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut-1020x765.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut-160x120.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut-1536x1152.jpg 1536w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Assemblywoman Rebecca Bauer-Kahan, D-Orinda, on Political Breakdown. \u003ccite>(Guy Marzorati/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“We’ve seen suicides lately. We’ve seen all sorts of mental health disruptions caused by AI companions. That said, the fight is just beginning,” Common Sense Media CEO Jim Steyer told KQED. “California is clearly leading the way in the United States and globally on these issues, and the next year or two are going to be absolutely critical in defining regulations, guardrails and a common sense future for the big tech industry.”\u003c/p>\n\u003cp>The trade group TechNet lobbied heavily against the bill, \u003ca href=\"https://www.technet.org/the-impact-of-ca-ab-1064/\">running ads\u003c/a> that warned that Bauer-Kahan’s bill could deny children access to critical tools they need to succeed.\u003c/p>\n\u003cp>“We appreciate Governor Newsom’s thoughtful consideration and ultimate veto of this proposed legislation,” wrote Robert Boykin, TechNet’s Executive Director for California and the Southwest. “While TechNet shares the goal of AB 1064, the bill fails to meet its stated objectives while threatening students’ access to valuable AI-driven learning tools, potentially life-saving medical treatments, crisis response interventions, safety mechanisms, and other valuable AI technologies.”\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12059209",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/251008_ENDOF10_-9-KQED.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>The host of AI-related bills that made it to Newsom’s desk this legislative session presented him with a political \u003ca href=\"https://www.kqed.org/news/12052617/newsoms-tightrope-walk-between-ai-regulation-and-silicon-valley-cash\">balancing act\u003c/a>, as he eyes a \u003ca href=\"https://www.kqed.org/news/12043766/newsom-tries-to-find-political-footing-in-clash-with-trump\">run for the White House\u003c/a>. Many of the bills were opposed by trade associations heavily bankrolled by Silicon Valley, and California is home to \u003ca href=\"https://www.forbes.com/lists/ai50/\">32 of the 50 top AI companies\u003c/a> worldwide.\u003c/p>\n\u003cp>As if anticipating the blowback from child safety advocates, Newsom’s office released \u003ca href=\"https://www.gov.ca.gov/2025/10/13/governor-newsom-signs-bills-to-further-strengthen-californias-leadership-in-protecting-children-online/\">a list of 16 AI bills\u003c/a> he approved this session, some focused on children. “California has long stood as a bold leader in protecting children from the danger of emerging technology,” the statement read.\u003c/p>\n\u003cp>On the list: SB 243 by Sen. Steve Padilla, D-San Diego, which placed softer limits on AI chatbots for kids. Advocacy groups, including Common Sense Media and \u003ca href=\"https://techoversight.org/wp-content/uploads/2025/09/SB-243-Remove-Support.pdf\">Tech Oversight California\u003c/a>, pulled their support from the bill in mid-September, arguing industry-friendly amendments weakened it and could establish a “dangerous” precedent for other states and countries taking California’s lead on AI regulation.\u003c/p>\n\u003cp>In a similar vein, Newsom signed an \u003ca href=\"https://www.kqed.org/news/12058013/newsom-signs-california-ai-transparency-bill-tailored-to-meet-tech-industry-tastes\">industry-friendly version\u003c/a> of SB-53 by Sen. Scott Wiener (D-San Francisco), after his original effort became target No. 1 for Silicon Valley lobbyists\u003ca href=\"https://www.kqed.org/news/12007323/can-california-still-lead-on-ai-regulation-following-newsoms-veto-of-ai-safety-bill\"> last legislative session\u003c/a> and died on Newsom’s desk.\u003c/p>\n\u003cp>But not all the bills Newsom signed this legislation session lack teeth.\u003c/p>\n\u003cp>AB 621, for instance, expands the ability of deepfake pornography victims to sue anyone who creates, digitally alters, or distributes a sexually explicit image or video in which they appear to engage in sexual conduct without their consent. The expanded private right of action is considered a notable strength when most other AI bills rely on regulatory enforcement, penalties, or agency reporting to sway business practices.\u003c/p>\n\u003cp>Steyer said he was happy to see Newsom’s signature on AB 56, which supporters say will require first-in-the-nation warning labels on social media, similar to what California has mandated on packaging for alcohol and cigarettes.\u003c/p>\n\u003cp>“It’s clear that Gov. Newsom, and also the first partner, Jennifer Siebel Newsom, who’s heavily involved in all this legislation, have listened to parents, and advocacy groups around the state,” Steyer said.\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12059714/newsom-vetoes-most-watched-childrens-ai-bill-signs-16-others-targeting-tech",
"authors": [
"251"
],
"categories": [
"news_31795",
"news_8",
"news_13",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_34755",
"news_18538",
"news_22307",
"news_32668",
"news_30826",
"news_16",
"news_34532",
"news_34586",
"news_21285",
"news_1631",
"news_20385"
],
"featImg": "news_12051437",
"label": "news"
},
"news_12058013": {
"type": "posts",
"id": "news_12058013",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12058013",
"score": null,
"sort": [
1759183178000
]
},
"guestAuthors": [],
"slug": "newsom-signs-california-ai-transparency-bill-tailored-to-meet-tech-industry-tastes",
"title": "Newsom Signs California AI Transparency Bill Tailored to Meet Tech Industry Tastes",
"publishDate": 1759183178,
"format": "standard",
"headTitle": "Newsom Signs California AI Transparency Bill Tailored to Meet Tech Industry Tastes | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>Gov. Gavin Newsom today \u003ca href=\"https://www.gov.ca.gov/2025/09/29/governor-newsom-signs-sb-53-advancing-californias-world-leading-artificial-intelligence-industry/\">signed\u003c/a> into law\u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB53\"> Senate Bill 53\u003c/a>, which would require large model developers like Anthropic and Open AI to be transparent about safety measures they put in place to prevent catastrophic events. The legislation would also create CalCompute, a public cloud infrastructure that expands access to AI resources for researchers, startups and public institutions.\u003c/p>\n\u003cp>In announcing his decision, Newsom wrote, “California has proven that we can establish regulations to protect our communities while also ensuring that the growing AI industry continues to thrive. This legislation strikes that balance.”\u003c/p>\n\u003cp>Senator Scott Wiener (D-San Francisco) authored the bill, after his original effort became target No. 1 for Silicon Valley lobbyists\u003ca href=\"https://www.kqed.org/news/12007323/can-california-still-lead-on-ai-regulation-following-newsoms-veto-of-ai-safety-bill\"> last legislative session\u003c/a> and died on Newsom’s desk. That bill spooked high-profile California politicians, including\u003ca href=\"https://www.kqed.org/news/12002254/california-bill-to-regulate-catastrophic-effects-of-ai-heads-to-newsoms-desk\"> Nancy Pelosi,\u003c/a> nervous about getting on the wrong side of Big Tech. In last year’s veto message for SB 1047, Newsom announced a working group on AI, which helped lay the groundwork for \u003ca href=\"https://www.kqed.org/news/12020857/california-lawmaker-ready-revive-fight-regulating-ai\">SB 53\u003c/a>.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>“With a technology as transformative as AI, we have a responsibility to support that innovation while putting in place commonsense guardrails to understand and reduce risk,” \u003ca href=\"https://sd11.senate.ca.gov/news/governor-newsom-signs-senator-wieners-landmark-ai-law-set-commonsense-guardrails-boost\">wrote\u003c/a> Wiener. “I’m grateful to the Governor for his leadership in convening the Joint California AI Policy Working Group, working with us to refine the legislation, and now signing it into law.”\u003c/p>\n\u003cp>The working group issued its\u003ca href=\"https://www.gov.ca.gov/wp-content/uploads/2025/06/June-17-2025-%E2%80%93-The-California-Report-on-Frontier-AI-Policy.pdf\"> report\u003c/a> in June, calling on lawmakers to pass transparency requirements and whistleblower protections, declaring that California has the “responsibility” to ensure the safety of generative artificial intelligence software, “so that their benefit to society can be realized.”\u003c/p>\n\u003cfigure id=\"attachment_12058035\" class=\"wp-caption aligncenter\" style=\"max-width: 2560px\">\u003ca href=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-scaled.jpg\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12058035\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-scaled.jpg\" alt=\"\" width=\"2560\" height=\"1707\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-scaled.jpg 2560w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-2000x1333.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-1536x1024.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-2048x1365.jpg 2048w\" sizes=\"auto, (max-width: 2560px) 100vw, 2560px\">\u003c/a>\u003cfigcaption class=\"wp-caption-text\">Close-up of phone screen displaying Anthropic Claude, a Large Language Model (LLM) powered generative artificial intelligence chatbot, in Lafayette, California, June 27, 2024. \u003ccite>(Photo by Smith Collection/Gado/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The report noted that AI systems have been observed finding loopholes that allow them to behave in ways their programmers did not intend. Also, that competitive pressures are undermining safety, and policy intervention is needed to prevent a race to the bottom.\u003c/p>\n\u003cp>Anthropic, which makes the chatbot Claude, was the first major AI developer to endorse SB 53, having offered more cautious support for SB 1047. “We’re proud to have worked with Senator Wiener to help bring industry to the table and develop practical safeguards that create real accountability for how powerful AI systems are developed and deployed, which will in turn keep everyone safer as the rapid acceleration of AI capabilities continues,” wrote Jack Clark, co-founder and head of policy for Anthropic.[aside postID=news_12052617 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GETTYIMAGES-2228237489-KQED.jpg']Federal lawmakers on both sides of the aisle have historically taken a relatively\u003ca href=\"https://www.kqed.org/news/11905230/do-federal-lawmakers-have-the-stomach-to-rein-in-big-tech\"> light touch\u003c/a> toward regulating the technology industry. Despite high-drama hearings about troubling trends in social media and now AI, few bills make it out of their respective committees, let alone to a floor vote. “While federal standards remain essential to avoid a patchwork of state regulations, California has created a strong framework that balances public safety with continued innovation,” Clark added.\u003c/p>\n\u003cp>This time around, other AI developers got behind Wiener’s effort. “Meta supports balanced AI regulation and the California Frontier AI law is a positive step in that direction,” a spokesperson for Meta wrote in a statement.\u003c/p>\n\u003cp>Earlier this year, a coalition of more than 20 tech and youth safety advocacy organizations\u003ca href=\"https://encodeai.org/wp-content/uploads/2025/09/SB-53-Coalition-Letter-9_24_2025.pdf\"> sent a letter\u003c/a> to Gov. Newsom in support of SB 53. “If basic guardrails like this had existed at the inception of social media, our children could be living in a safer, healthier world,” the letter said.\u003c/p>\n\u003cp>“We are incredibly proud to have worked with Senator Wiener and Governor Newsom on this AI safety legislation,” wrote Sneha Revanur, founder of Encode AI, a youth-led nonprofit that pushes for responsible AI through policy. The group was one of the primary drivers behind that coalition. “Frontier AI models have immense potential but without proper oversight, they can create real risks and harms. California has shown it’s possible to lead on AI safety without stifling progress.”\u003c/p>\n\u003cp>The bill was opposed by business and industry representatives, including the California Chamber of Commerce, TechNet and Silicon Valley Leadership Group and TechNet.\u003c/p>\n\u003cp>“It’s vital that we strengthen California’s role as the global leader in AI and the epicenter of innovation. SVLG is committed to advocating for policies that seek to responsibly scale this transformative technology at this pivotal juncture and to unleash a new wave of innovation and growth,” Ahmad Thomas, CEO of Silicon Valley Leadership Group, wrote in a statement. “We will continue to work with the Governor and leaders in the Legislature to ensure that new laws and regulations don’t impose undue burdens on the most innovative companies in the world.”\u003c/p>\n\u003cp> \u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "Gov. Gavin Newsom signed State Senator Scott Wiener’s SB 53, which aims to put safety guardrails on AI development while not squashing the growing AI industry. ",
"status": "publish",
"parent": 0,
"modified": 1759246357,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 14,
"wordCount": 838
},
"headData": {
"title": "Newsom Signs California AI Transparency Bill Tailored to Meet Tech Industry Tastes | KQED",
"description": "Gov. Gavin Newsom signed State Senator Scott Wiener’s SB 53, which aims to put safety guardrails on AI development while not squashing the growing AI industry. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Newsom Signs California AI Transparency Bill Tailored to Meet Tech Industry Tastes",
"datePublished": "2025-09-29T14:59:38-07:00",
"dateModified": "2025-09-30T08:32:37-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 8,
"slug": "news",
"name": "News"
},
"sticky": false,
"nprStoryId": "kqed-12058013",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12058013/newsom-signs-california-ai-transparency-bill-tailored-to-meet-tech-industry-tastes",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>Gov. Gavin Newsom today \u003ca href=\"https://www.gov.ca.gov/2025/09/29/governor-newsom-signs-sb-53-advancing-californias-world-leading-artificial-intelligence-industry/\">signed\u003c/a> into law\u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB53\"> Senate Bill 53\u003c/a>, which would require large model developers like Anthropic and Open AI to be transparent about safety measures they put in place to prevent catastrophic events. The legislation would also create CalCompute, a public cloud infrastructure that expands access to AI resources for researchers, startups and public institutions.\u003c/p>\n\u003cp>In announcing his decision, Newsom wrote, “California has proven that we can establish regulations to protect our communities while also ensuring that the growing AI industry continues to thrive. This legislation strikes that balance.”\u003c/p>\n\u003cp>Senator Scott Wiener (D-San Francisco) authored the bill, after his original effort became target No. 1 for Silicon Valley lobbyists\u003ca href=\"https://www.kqed.org/news/12007323/can-california-still-lead-on-ai-regulation-following-newsoms-veto-of-ai-safety-bill\"> last legislative session\u003c/a> and died on Newsom’s desk. That bill spooked high-profile California politicians, including\u003ca href=\"https://www.kqed.org/news/12002254/california-bill-to-regulate-catastrophic-effects-of-ai-heads-to-newsoms-desk\"> Nancy Pelosi,\u003c/a> nervous about getting on the wrong side of Big Tech. In last year’s veto message for SB 1047, Newsom announced a working group on AI, which helped lay the groundwork for \u003ca href=\"https://www.kqed.org/news/12020857/california-lawmaker-ready-revive-fight-regulating-ai\">SB 53\u003c/a>.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>“With a technology as transformative as AI, we have a responsibility to support that innovation while putting in place commonsense guardrails to understand and reduce risk,” \u003ca href=\"https://sd11.senate.ca.gov/news/governor-newsom-signs-senator-wieners-landmark-ai-law-set-commonsense-guardrails-boost\">wrote\u003c/a> Wiener. “I’m grateful to the Governor for his leadership in convening the Joint California AI Policy Working Group, working with us to refine the legislation, and now signing it into law.”\u003c/p>\n\u003cp>The working group issued its\u003ca href=\"https://www.gov.ca.gov/wp-content/uploads/2025/06/June-17-2025-%E2%80%93-The-California-Report-on-Frontier-AI-Policy.pdf\"> report\u003c/a> in June, calling on lawmakers to pass transparency requirements and whistleblower protections, declaring that California has the “responsibility” to ensure the safety of generative artificial intelligence software, “so that their benefit to society can be realized.”\u003c/p>\n\u003cfigure id=\"attachment_12058035\" class=\"wp-caption aligncenter\" style=\"max-width: 2560px\">\u003ca href=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-scaled.jpg\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12058035\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-scaled.jpg\" alt=\"\" width=\"2560\" height=\"1707\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-scaled.jpg 2560w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-2000x1333.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-1536x1024.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-2048x1365.jpg 2048w\" sizes=\"auto, (max-width: 2560px) 100vw, 2560px\">\u003c/a>\u003cfigcaption class=\"wp-caption-text\">Close-up of phone screen displaying Anthropic Claude, a Large Language Model (LLM) powered generative artificial intelligence chatbot, in Lafayette, California, June 27, 2024. \u003ccite>(Photo by Smith Collection/Gado/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The report noted that AI systems have been observed finding loopholes that allow them to behave in ways their programmers did not intend. Also, that competitive pressures are undermining safety, and policy intervention is needed to prevent a race to the bottom.\u003c/p>\n\u003cp>Anthropic, which makes the chatbot Claude, was the first major AI developer to endorse SB 53, having offered more cautious support for SB 1047. “We’re proud to have worked with Senator Wiener to help bring industry to the table and develop practical safeguards that create real accountability for how powerful AI systems are developed and deployed, which will in turn keep everyone safer as the rapid acceleration of AI capabilities continues,” wrote Jack Clark, co-founder and head of policy for Anthropic.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12052617",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GETTYIMAGES-2228237489-KQED.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Federal lawmakers on both sides of the aisle have historically taken a relatively\u003ca href=\"https://www.kqed.org/news/11905230/do-federal-lawmakers-have-the-stomach-to-rein-in-big-tech\"> light touch\u003c/a> toward regulating the technology industry. Despite high-drama hearings about troubling trends in social media and now AI, few bills make it out of their respective committees, let alone to a floor vote. “While federal standards remain essential to avoid a patchwork of state regulations, California has created a strong framework that balances public safety with continued innovation,” Clark added.\u003c/p>\n\u003cp>This time around, other AI developers got behind Wiener’s effort. “Meta supports balanced AI regulation and the California Frontier AI law is a positive step in that direction,” a spokesperson for Meta wrote in a statement.\u003c/p>\n\u003cp>Earlier this year, a coalition of more than 20 tech and youth safety advocacy organizations\u003ca href=\"https://encodeai.org/wp-content/uploads/2025/09/SB-53-Coalition-Letter-9_24_2025.pdf\"> sent a letter\u003c/a> to Gov. Newsom in support of SB 53. “If basic guardrails like this had existed at the inception of social media, our children could be living in a safer, healthier world,” the letter said.\u003c/p>\n\u003cp>“We are incredibly proud to have worked with Senator Wiener and Governor Newsom on this AI safety legislation,” wrote Sneha Revanur, founder of Encode AI, a youth-led nonprofit that pushes for responsible AI through policy. The group was one of the primary drivers behind that coalition. “Frontier AI models have immense potential but without proper oversight, they can create real risks and harms. California has shown it’s possible to lead on AI safety without stifling progress.”\u003c/p>\n\u003cp>The bill was opposed by business and industry representatives, including the California Chamber of Commerce, TechNet and Silicon Valley Leadership Group and TechNet.\u003c/p>\n\u003cp>“It’s vital that we strengthen California’s role as the global leader in AI and the epicenter of innovation. SVLG is committed to advocating for policies that seek to responsibly scale this transformative technology at this pivotal juncture and to unleash a new wave of innovation and growth,” Ahmad Thomas, CEO of Silicon Valley Leadership Group, wrote in a statement. “We will continue to work with the Governor and leaders in the Legislature to ensure that new laws and regulations don’t impose undue burdens on the most innovative companies in the world.”\u003c/p>\n\u003cp> \u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12058013/newsom-signs-california-ai-transparency-bill-tailored-to-meet-tech-industry-tastes",
"authors": [
"251"
],
"categories": [
"news_31795",
"news_28250",
"news_8",
"news_13",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_34755",
"news_18538",
"news_22307",
"news_32668",
"news_27626",
"news_16",
"news_34586",
"news_1631"
],
"featImg": "news_12051438",
"label": "news"
},
"news_12057720": {
"type": "posts",
"id": "news_12057720",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12057720",
"score": null,
"sort": [
1758913476000
]
},
"guestAuthors": [],
"slug": "nasa-mission-with-help-from-uc-berkeley-to-explore-earths-exosphere",
"title": "NASA Mission, With Help From UC Berkeley, To Explore Earth's Exosphere",
"publishDate": 1758913476,
"format": "audio",
"headTitle": "NASA Mission, With Help From UC Berkeley, To Explore Earth’s Exosphere | KQED",
"labelTerm": {},
"content": "\u003cp>\u003cb>Here are the morning’s top stories on Friday, September 26, 2025…\u003c/b>\u003c/p>\n\u003cul>\n\u003cli style=\"font-weight: 400\">\u003cspan style=\"font-weight: 400\">This week a SpaceX Falcon9 rocket launched from Florida – the mission? To study the outermost parts of the earth’s atmosphere. The mission is being \u003c/span>\u003ca href=\"https://www.ssl.berkeley.edu/earth-geospace/carruthers-observatory-fact-sheet/\">\u003cspan style=\"font-weight: 400\">steered by remote control from UC Berkeley’s Space Sciences Laboratory. \u003c/span>\u003c/a>\u003c/li>\n\u003cli style=\"font-weight: 400\">\u003cspan style=\"font-weight: 400\">A \u003c/span>\u003ca href=\"https://laist.com/news/climate-environment/l-a-county-report-about-eaton-and-palisades-fires-finds-failings-in-emergency-response\">\u003cspan style=\"font-weight: 400\">report out this week\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> details major failures in LA County’s response to the January firestorms.\u003c/span>\u003c/li>\n\u003cli style=\"font-weight: 400\">\u003cspan style=\"font-weight: 400\">Immigration agents \u003c/span>\u003ca href=\"https://www.kvcrnews.org/local-news/2025-09-25/federal-immigration-agents-arrest-three-workers-at-pomona-day-labor-center\">\u003cspan style=\"font-weight: 400\">arrested three day laborers\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> outside of a well-known worker center in Pomona Thursday morning. Immigration advocates say they’re worried about the health and safety of the people now being detained. \u003c/span>\u003c/li>\n\u003cli>A Southern California attorney is \u003ca href=\"https://calmatters.org/economy/technology/2025/09/chatgpt-lawyer-fine-ai-regulation/\">facing a historic fine\u003c/a> for filing a state court appeal full of fake quotations generated by the artificial intelligence tool ChatGPT.\u003c/li>\n\u003c/ul>\n\u003ch2>\u003cstrong>Space Mission Aims To Study Outermost Parts Of Earth’s Atmosphere\u003c/strong>\u003c/h2>\n\u003cp>A SpaceX Falcon 9 \u003ca href=\"https://science.nasa.gov/science-research/heliophysics/upcoming-launch-to-boost-nasas-study-of-suns-influence-across-space/\">launched from Kennedy Space Center\u003c/a> in Florida this week. The Carruthers mission is named after Dr. George Carruthers, creator of the Moon-based telescope that captured the first images of Earth’s exosphere. The project will look to expand his work by charting changes in the outermost parts of our atmosphere.\u003c/p>\n\u003cp>The mission is being steered from \u003ca href=\"https://www.ssl.berkeley.edu/earth-geospace/carruthers-observatory-fact-sheet/\">UC Berkeley’s Space Science Laboratory.\u003c/a> Abhi Tripathi is director of mission operations for the lab. He said there are only a few windows throughout the day where operators can receive information from the spacecraft and send instructions back. “If something flashes red, we have to quickly diagnose what is the issue and then figure out what we want to do to get it back within limits,” he said.\u003c/p>\n\u003cp>The mission’s vantage point is expected to offer a complete view of the exosphere that is not visible from the Moon. Lindy Elkins-Tanton is the director of the Berkeley laboratory. She said it’s fundamental because the exosphere is where satellites orbit and right now, we don’t know very well how to protect them from violent space weather coming from the sun. “The kind of information this mission is gathering is good for every person because of the need that we have for the instrumental security of our satellites around the Earth that we all use literally every day in ways that we don’t even notice,” she said.\u003c/p>\n\u003ch2>\u003ca href=\"https://laist.com/news/climate-environment/l-a-county-report-about-eaton-and-palisades-fires-finds-failings-in-emergency-response\">\u003cstrong>Independent Review Finds ‘Outdated’ Policies Hampered Response To LA Fires\u003c/strong>\u003c/a>\u003c/h2>\n\u003cp>An\u003ca class=\"Link\" href=\"https://file.lacounty.gov/SDSInter/bos/supdocs/207915.pdf?utm_content=&utm_medium=email&utm_name=&utm_source=govdelivery&utm_term=\" target=\"_blank\" rel=\"noopener\" data-cms-ai=\"0\"> \u003cu>after-action report\u003c/u>\u003c/a> released Thursday about the\u003ca class=\"Link\" href=\"https://laist.com/news/climate-environment/los-angeles-wildfire-recovery-plan-eaton-palisades-southern-california-fires\" target=\"_blank\" rel=\"noopener\" data-cms-ai=\"0\"> \u003cu>Eaton and Palisades fires\u003c/u>\u003c/a> details how the unprecedented January firestorms unfolded and the failings of L.A. County’s emergency response. It also lays out recommendations for changes and reforms.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>At a news conference Thursday, county leaders and one of the report’s authors repeatedly emphasized that there was no single point of failure that led to the deaths of 31 people and devastation, but rather, a number of failures caused by a lack of preparation, coordination and resources. Those shortcomings were amplified by extreme winds and fire behavior.\u003c/p>\n\u003cp>The report — compiled by the\u003ca class=\"Link\" href=\"https://www.mcchrystalgroup.com/about\" target=\"_blank\" rel=\"noopener\" data-cms-ai=\"0\"> \u003cu>McChrystal Group\u003c/u>\u003c/a> and released more than eight months after the fires — found that county practices around issuing emergency alerts, specifically evacuations, are “outdated, unclear and contradictory.” That, coupled with confusion about who has what authority around evacuation decision-making, “led to inconsistencies in preparedness strategies across the county and a lack of clear documentation and communication processes,” the report said. In addition, the report said, first responders using a variety of unconnected platforms and inconsistent practices struggled to share information in real-time.\u003c/p>\n\u003cp>“The extreme and rapidly moving fire conditions challenged the situational awareness of fire and law enforcement first responders,” the report said, “making it difficult to communicate the fire’s location to the public. This was especially prevalent during the Eaton Fire, when wind conditions grounded aerial resources, including surveillance, almost immediately after the fire started.”\u003c/p>\n\u003cp>The report recommends restructuring and increasing staffing at the Office of Emergency Management, updating emergency preparedness training and policies and upgrading obsolete systems, as well as investing in public education about emergencies. County officials repeatedly said Thursday that they were committed to making the changes necessary, and have already begun to do so in some cases. Including, exploring new incident management systems.\u003c/p>\n\u003ch2 class=\"ArtP-headline\">\u003ca href=\"https://www.kvcrnews.org/local-news/2025-09-25/federal-immigration-agents-arrest-three-workers-at-pomona-day-labor-center\">\u003cstrong>Federal Immigration Agents Arrest Three Workers At Pomona Day Labor Center\u003c/strong>\u003c/a>\u003c/h2>\n\u003cp>Immigration agents arrested three day laborers outside of a well-known worker center in Pomona on Thursday. Immigration advocates say they’re worried about the health and safety of the people now being detained.\u003c/p>\n\u003cp>Staff at the Pomona Day Labor Center say federal agents showed up around 9 a.m. to their site located next to a Contractors’ Warehouse store on Mission Boulevard. A video obtained by KVCR shows agents surrounding two workers inside the parking lot and taking them into custody. One man’s identity remains unknown, but another is believed to be Fernando Salazar, a Mexican worker who regularly visits the center, said Alexis Teodoro, worker-rights director at the Pomona Economic Opportunity Center. “When we looked up, we were extremely surprised to see the agents,” he said. “I immediately started questioning the agents to identify themselves.”\u003c/p>\n\u003cp>Agents ignored repeated demands from witnesses and PEOC staff to leave the property. Video shows Teodoro and other staffers urging them to produce a warrant. A separate Ring camera video captures a third worker sprinting into a nearby neighborhood. Teodoro identified him as Benjamin Alcocer, who was later detained.\u003c/p>\n\u003cp>Pomona has experienced several incidents involving ICE and Border Patrol since April, which PEOC said resulted in several detentions and deportations. Federal immigration arrests also took place in nearby Chino Hills and Montclair on Thursday, according to immigrant rights organizations.\u003c/p>\n\u003ch2 class=\"entry-title \">\u003ca href=\"https://calmatters.org/economy/technology/2025/09/chatgpt-lawyer-fine-ai-regulation/\">\u003cstrong>California Issues Historic Fine Over Lawyer’s ChatGPT Fabrications\u003c/strong>\u003c/a>\u003c/h2>\n\u003cp>A California attorney must pay a $10,000 fine for filing a state court appeal full of fake quotations generated by the artificial intelligence tool ChatGPT.\u003c/p>\n\u003cp>The fine appears to be the largest issued over AI fabrications by a California court and came with a \u003ca href=\"https://www4.courts.ca.gov/opinions/documents/B331918.PDF\" target=\"_blank\" rel=\"noreferrer noopener\">blistering opinion\u003c/a> stating that 21 of 23 quotes from cases cited in the attorney’s opening brief were made up. It also noted that numerous out-of-state and federal courts have confronted attorneys for citing fake legal authority.\u003c/p>\n\u003cp>The opinion from California’s 2nd District Court of Appeal is a clear example of why the state’s legal authorities are scrambling to regulate the use of AI in the judiciary. Earlier this month, the state’s Judicial Council \u003ca href=\"https://courts.ca.gov/cms/rules/index/standards/Standard10_80\" target=\"_blank\" rel=\"noreferrer noopener\">issued guidelines requiring judges and court staff\u003c/a> to either ban generative AI or adopt a generative AI use policy by Dec. 15. Meanwhile, the California Bar Association is considering whether to strengthen its code of conduct to account for various forms of AI following a request by the California Supreme Court last month.\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n\u003cp>The Los Angeles-area attorney who was fined, Amir Mostafavi, told the court that he did not read text generated by the AI model before submitting the appeal in July 2023, months after OpenAI marketed ChatGPT as capable of \u003ca href=\"https://law.stanford.edu/2023/04/19/gpt-4-passes-the-bar-exam-what-that-means-for-artificial-intelligence-tools-in-the-legal-industry/\" target=\"_blank\" rel=\"noreferrer noopener\">passing the bar exam\u003c/a>. A three-judge panel fined him for filing a frivolous appeal, violating court rules, citing fake cases, and wasting the court’s time and the taxpayers money, according to the opinion. Mostafavi told CalMatters he wrote the appeal and then used ChatGPT to try and improve it. He said that he didn’t know it would add case citations or make things up. He thinks it is unrealistic to expect lawyers to stop using AI. It’s become an important tool just as online databases largely replaced law libraries and, until AI systems stop hallucinating fake information, he suggests lawyers who use AI to proceed with caution.\u003c/p>\n\n",
"blocks": [],
"excerpt": "The mission is being steered by remote control from UC Berkeley's Space Sciences Lab. ",
"status": "publish",
"parent": 0,
"modified": 1758913476,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 19,
"wordCount": 1335
},
"headData": {
"title": "NASA Mission, With Help From UC Berkeley, To Explore Earth's Exosphere | KQED",
"description": "The mission is being steered by remote control from UC Berkeley's Space Sciences Lab. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "NASA Mission, With Help From UC Berkeley, To Explore Earth's Exosphere",
"datePublished": "2025-09-26T12:04:36-07:00",
"dateModified": "2025-09-26T12:04:36-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 33520,
"slug": "podcast",
"name": "Podcast"
},
"source": "The California Report",
"sourceUrl": "https://www.kqed.org/news/tag/tcrarchive/",
"audioUrl": "https://www.podtrac.com/pts/redirect.mp3/chrt.fm/track/G6C7C3/traffic.megaphone.fm/KQINC1808550927.mp3?updated=1758894815",
"sticky": false,
"nprStoryId": "kqed-12057720",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12057720/nasa-mission-with-help-from-uc-berkeley-to-explore-earths-exosphere",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>\u003cb>Here are the morning’s top stories on Friday, September 26, 2025…\u003c/b>\u003c/p>\n\u003cul>\n\u003cli style=\"font-weight: 400\">\u003cspan style=\"font-weight: 400\">This week a SpaceX Falcon9 rocket launched from Florida – the mission? To study the outermost parts of the earth’s atmosphere. The mission is being \u003c/span>\u003ca href=\"https://www.ssl.berkeley.edu/earth-geospace/carruthers-observatory-fact-sheet/\">\u003cspan style=\"font-weight: 400\">steered by remote control from UC Berkeley’s Space Sciences Laboratory. \u003c/span>\u003c/a>\u003c/li>\n\u003cli style=\"font-weight: 400\">\u003cspan style=\"font-weight: 400\">A \u003c/span>\u003ca href=\"https://laist.com/news/climate-environment/l-a-county-report-about-eaton-and-palisades-fires-finds-failings-in-emergency-response\">\u003cspan style=\"font-weight: 400\">report out this week\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> details major failures in LA County’s response to the January firestorms.\u003c/span>\u003c/li>\n\u003cli style=\"font-weight: 400\">\u003cspan style=\"font-weight: 400\">Immigration agents \u003c/span>\u003ca href=\"https://www.kvcrnews.org/local-news/2025-09-25/federal-immigration-agents-arrest-three-workers-at-pomona-day-labor-center\">\u003cspan style=\"font-weight: 400\">arrested three day laborers\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> outside of a well-known worker center in Pomona Thursday morning. Immigration advocates say they’re worried about the health and safety of the people now being detained. \u003c/span>\u003c/li>\n\u003cli>A Southern California attorney is \u003ca href=\"https://calmatters.org/economy/technology/2025/09/chatgpt-lawyer-fine-ai-regulation/\">facing a historic fine\u003c/a> for filing a state court appeal full of fake quotations generated by the artificial intelligence tool ChatGPT.\u003c/li>\n\u003c/ul>\n\u003ch2>\u003cstrong>Space Mission Aims To Study Outermost Parts Of Earth’s Atmosphere\u003c/strong>\u003c/h2>\n\u003cp>A SpaceX Falcon 9 \u003ca href=\"https://science.nasa.gov/science-research/heliophysics/upcoming-launch-to-boost-nasas-study-of-suns-influence-across-space/\">launched from Kennedy Space Center\u003c/a> in Florida this week. The Carruthers mission is named after Dr. George Carruthers, creator of the Moon-based telescope that captured the first images of Earth’s exosphere. The project will look to expand his work by charting changes in the outermost parts of our atmosphere.\u003c/p>\n\u003cp>The mission is being steered from \u003ca href=\"https://www.ssl.berkeley.edu/earth-geospace/carruthers-observatory-fact-sheet/\">UC Berkeley’s Space Science Laboratory.\u003c/a> Abhi Tripathi is director of mission operations for the lab. He said there are only a few windows throughout the day where operators can receive information from the spacecraft and send instructions back. “If something flashes red, we have to quickly diagnose what is the issue and then figure out what we want to do to get it back within limits,” he said.\u003c/p>\n\u003cp>The mission’s vantage point is expected to offer a complete view of the exosphere that is not visible from the Moon. Lindy Elkins-Tanton is the director of the Berkeley laboratory. She said it’s fundamental because the exosphere is where satellites orbit and right now, we don’t know very well how to protect them from violent space weather coming from the sun. “The kind of information this mission is gathering is good for every person because of the need that we have for the instrumental security of our satellites around the Earth that we all use literally every day in ways that we don’t even notice,” she said.\u003c/p>\n\u003ch2>\u003ca href=\"https://laist.com/news/climate-environment/l-a-county-report-about-eaton-and-palisades-fires-finds-failings-in-emergency-response\">\u003cstrong>Independent Review Finds ‘Outdated’ Policies Hampered Response To LA Fires\u003c/strong>\u003c/a>\u003c/h2>\n\u003cp>An\u003ca class=\"Link\" href=\"https://file.lacounty.gov/SDSInter/bos/supdocs/207915.pdf?utm_content=&utm_medium=email&utm_name=&utm_source=govdelivery&utm_term=\" target=\"_blank\" rel=\"noopener\" data-cms-ai=\"0\"> \u003cu>after-action report\u003c/u>\u003c/a> released Thursday about the\u003ca class=\"Link\" href=\"https://laist.com/news/climate-environment/los-angeles-wildfire-recovery-plan-eaton-palisades-southern-california-fires\" target=\"_blank\" rel=\"noopener\" data-cms-ai=\"0\"> \u003cu>Eaton and Palisades fires\u003c/u>\u003c/a> details how the unprecedented January firestorms unfolded and the failings of L.A. County’s emergency response. It also lays out recommendations for changes and reforms.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>At a news conference Thursday, county leaders and one of the report’s authors repeatedly emphasized that there was no single point of failure that led to the deaths of 31 people and devastation, but rather, a number of failures caused by a lack of preparation, coordination and resources. Those shortcomings were amplified by extreme winds and fire behavior.\u003c/p>\n\u003cp>The report — compiled by the\u003ca class=\"Link\" href=\"https://www.mcchrystalgroup.com/about\" target=\"_blank\" rel=\"noopener\" data-cms-ai=\"0\"> \u003cu>McChrystal Group\u003c/u>\u003c/a> and released more than eight months after the fires — found that county practices around issuing emergency alerts, specifically evacuations, are “outdated, unclear and contradictory.” That, coupled with confusion about who has what authority around evacuation decision-making, “led to inconsistencies in preparedness strategies across the county and a lack of clear documentation and communication processes,” the report said. In addition, the report said, first responders using a variety of unconnected platforms and inconsistent practices struggled to share information in real-time.\u003c/p>\n\u003cp>“The extreme and rapidly moving fire conditions challenged the situational awareness of fire and law enforcement first responders,” the report said, “making it difficult to communicate the fire’s location to the public. This was especially prevalent during the Eaton Fire, when wind conditions grounded aerial resources, including surveillance, almost immediately after the fire started.”\u003c/p>\n\u003cp>The report recommends restructuring and increasing staffing at the Office of Emergency Management, updating emergency preparedness training and policies and upgrading obsolete systems, as well as investing in public education about emergencies. County officials repeatedly said Thursday that they were committed to making the changes necessary, and have already begun to do so in some cases. Including, exploring new incident management systems.\u003c/p>\n\u003ch2 class=\"ArtP-headline\">\u003ca href=\"https://www.kvcrnews.org/local-news/2025-09-25/federal-immigration-agents-arrest-three-workers-at-pomona-day-labor-center\">\u003cstrong>Federal Immigration Agents Arrest Three Workers At Pomona Day Labor Center\u003c/strong>\u003c/a>\u003c/h2>\n\u003cp>Immigration agents arrested three day laborers outside of a well-known worker center in Pomona on Thursday. Immigration advocates say they’re worried about the health and safety of the people now being detained.\u003c/p>\n\u003cp>Staff at the Pomona Day Labor Center say federal agents showed up around 9 a.m. to their site located next to a Contractors’ Warehouse store on Mission Boulevard. A video obtained by KVCR shows agents surrounding two workers inside the parking lot and taking them into custody. One man’s identity remains unknown, but another is believed to be Fernando Salazar, a Mexican worker who regularly visits the center, said Alexis Teodoro, worker-rights director at the Pomona Economic Opportunity Center. “When we looked up, we were extremely surprised to see the agents,” he said. “I immediately started questioning the agents to identify themselves.”\u003c/p>\n\u003cp>Agents ignored repeated demands from witnesses and PEOC staff to leave the property. Video shows Teodoro and other staffers urging them to produce a warrant. A separate Ring camera video captures a third worker sprinting into a nearby neighborhood. Teodoro identified him as Benjamin Alcocer, who was later detained.\u003c/p>\n\u003cp>Pomona has experienced several incidents involving ICE and Border Patrol since April, which PEOC said resulted in several detentions and deportations. Federal immigration arrests also took place in nearby Chino Hills and Montclair on Thursday, according to immigrant rights organizations.\u003c/p>\n\u003ch2 class=\"entry-title \">\u003ca href=\"https://calmatters.org/economy/technology/2025/09/chatgpt-lawyer-fine-ai-regulation/\">\u003cstrong>California Issues Historic Fine Over Lawyer’s ChatGPT Fabrications\u003c/strong>\u003c/a>\u003c/h2>\n\u003cp>A California attorney must pay a $10,000 fine for filing a state court appeal full of fake quotations generated by the artificial intelligence tool ChatGPT.\u003c/p>\n\u003cp>The fine appears to be the largest issued over AI fabrications by a California court and came with a \u003ca href=\"https://www4.courts.ca.gov/opinions/documents/B331918.PDF\" target=\"_blank\" rel=\"noreferrer noopener\">blistering opinion\u003c/a> stating that 21 of 23 quotes from cases cited in the attorney’s opening brief were made up. It also noted that numerous out-of-state and federal courts have confronted attorneys for citing fake legal authority.\u003c/p>\n\u003cp>The opinion from California’s 2nd District Court of Appeal is a clear example of why the state’s legal authorities are scrambling to regulate the use of AI in the judiciary. Earlier this month, the state’s Judicial Council \u003ca href=\"https://courts.ca.gov/cms/rules/index/standards/Standard10_80\" target=\"_blank\" rel=\"noreferrer noopener\">issued guidelines requiring judges and court staff\u003c/a> to either ban generative AI or adopt a generative AI use policy by Dec. 15. Meanwhile, the California Bar Association is considering whether to strengthen its code of conduct to account for various forms of AI following a request by the California Supreme Court last month.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>The Los Angeles-area attorney who was fined, Amir Mostafavi, told the court that he did not read text generated by the AI model before submitting the appeal in July 2023, months after OpenAI marketed ChatGPT as capable of \u003ca href=\"https://law.stanford.edu/2023/04/19/gpt-4-passes-the-bar-exam-what-that-means-for-artificial-intelligence-tools-in-the-legal-industry/\" target=\"_blank\" rel=\"noreferrer noopener\">passing the bar exam\u003c/a>. A three-judge panel fined him for filing a frivolous appeal, violating court rules, citing fake cases, and wasting the court’s time and the taxpayers money, according to the opinion. Mostafavi told CalMatters he wrote the appeal and then used ChatGPT to try and improve it. He said that he didn’t know it would add case citations or make things up. He thinks it is unrealistic to expect lawyers to stop using AI. It’s become an important tool just as online databases largely replaced law libraries and, until AI systems stop hallucinating fake information, he suggests lawyers who use AI to proceed with caution.\u003c/p>\n\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12057720/nasa-mission-with-help-from-uc-berkeley-to-explore-earths-exosphere",
"authors": [
"11739"
],
"programs": [
"news_72"
],
"categories": [
"news_33520",
"news_34018"
],
"tags": [
"news_25184",
"news_32668",
"news_35915",
"news_35910",
"news_35916",
"news_34761",
"news_35914",
"news_21998",
"news_21268"
],
"featImg": "news_12057721",
"label": "source_news_12057720"
},
"news_12054490": {
"type": "posts",
"id": "news_12054490",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12054490",
"score": null,
"sort": [
1756983611000
]
},
"guestAuthors": [],
"slug": "child-safety-groups-demand-mental-health-guardrails-after-california-teens-suicide-using-chatgpt",
"title": "Child Safety Groups Demand Mental Health Guardrails, After California Teen’s Suicide Using ChatGPT",
"publishDate": 1756983611,
"format": "standard",
"headTitle": "Child Safety Groups Demand Mental Health Guardrails, After California Teen’s Suicide Using ChatGPT | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>\u003cem>If you or someone you know is struggling with thoughts of suicide, you can dial or text 988 and be connected to help.\u003cbr>\n\u003c/em>\u003cbr>\nWith its quick, often personable responses, \u003ca href=\"https://www.kqed.org/news/tag/chatgpt\">ChatGPT\u003c/a> can feel to some children more like an available friend than a language model engineered to guess its next word.\u003c/p>\n\u003cp>These blurred lines allow kids to go down “roads they should never go,” warn child safety advocates and tech policy groups, who have called for companies that design chatbots and artificial intelligence companions to take more responsibility for their program’s influence on youth.\u003c/p>\n\u003cp>This week, tech giant \u003ca href=\"https://openai.com/index/building-more-helpful-chatgpt-experiences-for-everyone/\">OpenAI\u003c/a> announced new safety measures for kids. The post didn’t mention 16-year-old Adam Raine, who, according to his parents, killed himself after discussing both his loneliness and plans to harm himself with ChatGPT.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>According to a lawsuit filed in San Francisco on Aug. 26, Maria and Matt Raine allege that ChatGPT-4o cultivated a psychological dependence in their son by continually encouraging and validating “whatever [he] expressed, including his most harmful and self-destructive thoughts.”\u003c/p>\n\u003cp>“This is an area that calls out for thoughtful common-sense regulation and guardrails. And quite frankly, that the leaders of all the major AI companies need to address,” said Jim Steyer, founder and CEO of Common Sense Media, which advocates safe media use for children.\u003c/p>\n\u003cp>With more than \u003ca href=\"https://mashable.com/article/openai-how-many-people-use-chatgpt\">500 million\u003c/a> weekly ChatGPT users and more than 2.5 billion prompts per day, users are increasingly turning to the large language model for \u003ca href=\"https://www.kqed.org/news/12049674/from-god-to-grief-people-are-asking-ai-the-big-questions-once-reserved-for-clergy\">emotional support.\u003c/a>\u003c/p>\n\u003cp>Both digital assistants like ChatGPT, as well as AI companions like Character.Ai and Replika, told researchers posing as 13-year-olds about drinking and drug use, instructed them on how to conceal eating disorders and even composed a suicide letter to their parents if asked, according to \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">research from Stanford University\u003c/a>.[aside postID=news_12053799 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/241009-OAKLAND-YOUTH-VOTE-MD-08-KQED-1020x680.jpg']Steyer said OpenAI has partnered with Common Sense Media and has taken the issue more seriously than Meta AI or X’s Grok. But he still recommended that young people under 18 — “AI natives” — be restricted from using chatbots for companionship or therapy, suggesting that enhanced controls may not go far enough.\u003c/p>\n\u003cp>“You can’t just think that parental controls are a be-all end-all solution. They’re hard to use, very easy to bypass for young people, and they put the burden on parents when, honestly, it should be on the tech companies to prevent these kinds of tragic situations,” Steyer said. “It’s more like a bandaid when what we need is a long-term cure.”\u003c/p>\n\u003cp>In a blog post on Tuesday, the company shared plans to make the chatbot safer for young people to use in recognition of the fact that “people turn to it in the most difficult of moments.” The changes are set to roll out within the next month, OpenAI said.\u003c/p>\n\u003cp>OpenAI did not immediately respond to a request for comment. But the planned updates promise to link parents’ and teens’ accounts, reroute sensitive conversations with youth and alert parents “when the system detects their teen is in a moment of acute distress.”\u003c/p>\n\u003cp>If a user expresses suicidal ideation, ChatGPT is trained to direct people to seek professional help, OpenAI stated in a\u003ca href=\"https://openai.com/index/helping-people-when-they-need-it-most/\"> post\u003c/a> last week. ChatGPT refers people to 988, the suicide and crisis hotline.\u003c/p>\n\u003cp>The program does not escalate reports of self-harm to law enforcement, “given the uniquely private nature of ChatGPT interactions.” Licensed psychotherapists aren’t universally mandated to report self-harm either, but they must intervene if the client is at immediate risk.\u003c/p>\n\u003cp>Common Sense Media is supporting legislation in California that would establish limits protecting children from AI and social media abuse. AB 56 would implement \u003ca href=\"https://www.kqed.org/news/12017249/california-bill-would-put-tobacco-like-warnings-social-media-apps\">social media warning labels \u003c/a>that clearly state the risks to children, not unlike the labels pasted on tobacco products.\u003c/p>\n\u003cfigure id=\"attachment_12054564\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12054564 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Samuel Altman, CEO of OpenAI, testifies before the Senate Judiciary Subcommittee on Privacy, Technology, and the Law May 16, 2023 in Washington, DC. \u003ccite>(Win McNamee/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The bill was proposed by Attorney General Rob Bonta and Orinda Assemblymember Rebecca Bauer-Kahan, and is headed to Gov. Gavin Newsom’s desk for signing.\u003c/p>\n\u003cp>A second bill, AB1064, would ban AI chatbots from manipulating children into forming emotional attachments or harvesting their personal and biometric data.\u003c/p>\n\u003cp>State Sen. Josh Becker (D-Menlo Park) also introduced an AI bill to protect vulnerable users from chatbots’ harmful effects: \u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB243&firstNav=tracking\">SB 243\u003c/a> would require companion chatbots to frequently remind users that it isn’t a person, in order to reduce the risk of emotional manipulation or \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">unhealthy attachment\u003c/a>.\u003c/p>\n\u003cp>Whether Newsom will support the bills, along with a flurry of other proposed AI-safety laws in Sacramento, remains to be seen. The governor told reporters in early August that he is trying to establish a middle ground that provides public safety guardrails without suppressing business: “We’ve led in AI innovation, and we’ve led in AI regulation, but we’re trying to find a balance.”\u003c/p>\n\u003cp>As Newsom eyes higher office, and the California governor’s race heats up, there’s been a surge in AI lobbying and political action committees from the industry, with a \u003ca href=\"https://www.wsj.com/politics/silicon-valley-launches-pro-ai-pacs-to-defend-industry-in-midterm-elections-287905b3?gaa_at=eafs&gaa_n=ASWzDAjaxxFIzEaiCnLuxtt5FYul1NMFgXzDPGeVaH0VKZedvoSLexjk_j2Gr_Q0ZKQ%3D&gaa_ts=68b063e0&gaa_sig=V93Si4VVkqKsN1H-aEXHbbUoyVrGdS9GECVqYESgBE7WTq_dVBNLHw5VIyH41lRNW0pQQRB3N7d0mV9v_EaR4Q%3D%3D\">report \u003c/a>last week from the \u003cem>Wall Street Journal\u003c/em> that Silicon Valley plans to pour $100 million into a network of organizations opposing AI regulation ahead of next year’s midterm elections.\u003c/p>\n\u003cp>But it may take more to convince Californians: seven in 10 state residents favor “strong laws to make AI fair” and believe voluntary rules “simply don’t go far enough,” according to recent\u003ca href=\"https://url.us.m.mimecastprotect.com/s/o-vjCADmygFVBPLwtGfgtGPCKp?domain=email.commoncause.org\"> polling by Tech Equity\u003c/a>. Meanwhile, 59% think “AI will most likely benefit the wealthiest households and corporations, not working people and the middle class.”\u003c/p>\n\u003cp>\u003cem>KQED’s \u003ca href=\"https://www.kqed.org/author/rachael-myrow\">Rachael Myrow\u003c/a> contributed to this report. \u003c/em>\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "Media safety advocates say minors shouldn’t be allowed to use artificial intelligence for companionship, and call for increased regulations in California. ",
"status": "publish",
"parent": 0,
"modified": 1757610610,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 22,
"wordCount": 1021
},
"headData": {
"title": "Child Safety Groups Demand Mental Health Guardrails, After California Teen’s Suicide Using ChatGPT | KQED",
"description": "Media safety advocates say minors shouldn’t be allowed to use artificial intelligence for companionship, and call for increased regulations in California. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Child Safety Groups Demand Mental Health Guardrails, After California Teen’s Suicide Using ChatGPT",
"datePublished": "2025-09-04T04:00:11-07:00",
"dateModified": "2025-09-11T10:10:10-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"audioUrl": "https://traffic.omny.fm/d/clips/0af137ef-751e-4b19-a055-aaef00d2d578/ffca7e9f-6831-41c5-bcaf-aaef00f5a073/ddeeed47-ed37-4e45-865e-b355010c5c6c/audio.mp3",
"sticky": false,
"nprStoryId": "kqed-12054490",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12054490/child-safety-groups-demand-mental-health-guardrails-after-california-teens-suicide-using-chatgpt",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>\u003cem>If you or someone you know is struggling with thoughts of suicide, you can dial or text 988 and be connected to help.\u003cbr>\n\u003c/em>\u003cbr>\nWith its quick, often personable responses, \u003ca href=\"https://www.kqed.org/news/tag/chatgpt\">ChatGPT\u003c/a> can feel to some children more like an available friend than a language model engineered to guess its next word.\u003c/p>\n\u003cp>These blurred lines allow kids to go down “roads they should never go,” warn child safety advocates and tech policy groups, who have called for companies that design chatbots and artificial intelligence companions to take more responsibility for their program’s influence on youth.\u003c/p>\n\u003cp>This week, tech giant \u003ca href=\"https://openai.com/index/building-more-helpful-chatgpt-experiences-for-everyone/\">OpenAI\u003c/a> announced new safety measures for kids. The post didn’t mention 16-year-old Adam Raine, who, according to his parents, killed himself after discussing both his loneliness and plans to harm himself with ChatGPT.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>According to a lawsuit filed in San Francisco on Aug. 26, Maria and Matt Raine allege that ChatGPT-4o cultivated a psychological dependence in their son by continually encouraging and validating “whatever [he] expressed, including his most harmful and self-destructive thoughts.”\u003c/p>\n\u003cp>“This is an area that calls out for thoughtful common-sense regulation and guardrails. And quite frankly, that the leaders of all the major AI companies need to address,” said Jim Steyer, founder and CEO of Common Sense Media, which advocates safe media use for children.\u003c/p>\n\u003cp>With more than \u003ca href=\"https://mashable.com/article/openai-how-many-people-use-chatgpt\">500 million\u003c/a> weekly ChatGPT users and more than 2.5 billion prompts per day, users are increasingly turning to the large language model for \u003ca href=\"https://www.kqed.org/news/12049674/from-god-to-grief-people-are-asking-ai-the-big-questions-once-reserved-for-clergy\">emotional support.\u003c/a>\u003c/p>\n\u003cp>Both digital assistants like ChatGPT, as well as AI companions like Character.Ai and Replika, told researchers posing as 13-year-olds about drinking and drug use, instructed them on how to conceal eating disorders and even composed a suicide letter to their parents if asked, according to \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">research from Stanford University\u003c/a>.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12053799",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/241009-OAKLAND-YOUTH-VOTE-MD-08-KQED-1020x680.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Steyer said OpenAI has partnered with Common Sense Media and has taken the issue more seriously than Meta AI or X’s Grok. But he still recommended that young people under 18 — “AI natives” — be restricted from using chatbots for companionship or therapy, suggesting that enhanced controls may not go far enough.\u003c/p>\n\u003cp>“You can’t just think that parental controls are a be-all end-all solution. They’re hard to use, very easy to bypass for young people, and they put the burden on parents when, honestly, it should be on the tech companies to prevent these kinds of tragic situations,” Steyer said. “It’s more like a bandaid when what we need is a long-term cure.”\u003c/p>\n\u003cp>In a blog post on Tuesday, the company shared plans to make the chatbot safer for young people to use in recognition of the fact that “people turn to it in the most difficult of moments.” The changes are set to roll out within the next month, OpenAI said.\u003c/p>\n\u003cp>OpenAI did not immediately respond to a request for comment. But the planned updates promise to link parents’ and teens’ accounts, reroute sensitive conversations with youth and alert parents “when the system detects their teen is in a moment of acute distress.”\u003c/p>\n\u003cp>If a user expresses suicidal ideation, ChatGPT is trained to direct people to seek professional help, OpenAI stated in a\u003ca href=\"https://openai.com/index/helping-people-when-they-need-it-most/\"> post\u003c/a> last week. ChatGPT refers people to 988, the suicide and crisis hotline.\u003c/p>\n\u003cp>The program does not escalate reports of self-harm to law enforcement, “given the uniquely private nature of ChatGPT interactions.” Licensed psychotherapists aren’t universally mandated to report self-harm either, but they must intervene if the client is at immediate risk.\u003c/p>\n\u003cp>Common Sense Media is supporting legislation in California that would establish limits protecting children from AI and social media abuse. AB 56 would implement \u003ca href=\"https://www.kqed.org/news/12017249/california-bill-would-put-tobacco-like-warnings-social-media-apps\">social media warning labels \u003c/a>that clearly state the risks to children, not unlike the labels pasted on tobacco products.\u003c/p>\n\u003cfigure id=\"attachment_12054564\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"wp-image-12054564 size-full\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/Sam-Altman_chatpgt-1536x1024.jpg 1536w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Samuel Altman, CEO of OpenAI, testifies before the Senate Judiciary Subcommittee on Privacy, Technology, and the Law May 16, 2023 in Washington, DC. \u003ccite>(Win McNamee/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The bill was proposed by Attorney General Rob Bonta and Orinda Assemblymember Rebecca Bauer-Kahan, and is headed to Gov. Gavin Newsom’s desk for signing.\u003c/p>\n\u003cp>A second bill, AB1064, would ban AI chatbots from manipulating children into forming emotional attachments or harvesting their personal and biometric data.\u003c/p>\n\u003cp>State Sen. Josh Becker (D-Menlo Park) also introduced an AI bill to protect vulnerable users from chatbots’ harmful effects: \u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB243&firstNav=tracking\">SB 243\u003c/a> would require companion chatbots to frequently remind users that it isn’t a person, in order to reduce the risk of emotional manipulation or \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">unhealthy attachment\u003c/a>.\u003c/p>\n\u003cp>Whether Newsom will support the bills, along with a flurry of other proposed AI-safety laws in Sacramento, remains to be seen. The governor told reporters in early August that he is trying to establish a middle ground that provides public safety guardrails without suppressing business: “We’ve led in AI innovation, and we’ve led in AI regulation, but we’re trying to find a balance.”\u003c/p>\n\u003cp>As Newsom eyes higher office, and the California governor’s race heats up, there’s been a surge in AI lobbying and political action committees from the industry, with a \u003ca href=\"https://www.wsj.com/politics/silicon-valley-launches-pro-ai-pacs-to-defend-industry-in-midterm-elections-287905b3?gaa_at=eafs&gaa_n=ASWzDAjaxxFIzEaiCnLuxtt5FYul1NMFgXzDPGeVaH0VKZedvoSLexjk_j2Gr_Q0ZKQ%3D&gaa_ts=68b063e0&gaa_sig=V93Si4VVkqKsN1H-aEXHbbUoyVrGdS9GECVqYESgBE7WTq_dVBNLHw5VIyH41lRNW0pQQRB3N7d0mV9v_EaR4Q%3D%3D\">report \u003c/a>last week from the \u003cem>Wall Street Journal\u003c/em> that Silicon Valley plans to pour $100 million into a network of organizations opposing AI regulation ahead of next year’s midterm elections.\u003c/p>\n\u003cp>But it may take more to convince Californians: seven in 10 state residents favor “strong laws to make AI fair” and believe voluntary rules “simply don’t go far enough,” according to recent\u003ca href=\"https://url.us.m.mimecastprotect.com/s/o-vjCADmygFVBPLwtGfgtGPCKp?domain=email.commoncause.org\"> polling by Tech Equity\u003c/a>. Meanwhile, 59% think “AI will most likely benefit the wealthiest households and corporations, not working people and the middle class.”\u003c/p>\n\u003cp>\u003cem>KQED’s \u003ca href=\"https://www.kqed.org/author/rachael-myrow\">Rachael Myrow\u003c/a> contributed to this report. \u003c/em>\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12054490/child-safety-groups-demand-mental-health-guardrails-after-california-teens-suicide-using-chatgpt",
"authors": [
"11925"
],
"categories": [
"news_31795",
"news_8",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_32668",
"news_27626",
"news_33542",
"news_689",
"news_38",
"news_34586",
"news_1631"
],
"featImg": "news_11998856",
"label": "news"
},
"news_12052617": {
"type": "posts",
"id": "news_12052617",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12052617",
"score": null,
"sort": [
1755612009000
]
},
"guestAuthors": [],
"slug": "newsoms-tightrope-walk-between-ai-regulation-and-silicon-valley-cash",
"title": "Newsom’s Tightrope Walk Between AI Regulation and Silicon Valley Cash",
"publishDate": 1755612009,
"format": "standard",
"headTitle": "Newsom’s Tightrope Walk Between AI Regulation and Silicon Valley Cash | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>As Gov. Gavin Newsom eyes a \u003ca href=\"https://www.kqed.org/news/12043766/newsom-tries-to-find-political-footing-in-clash-with-trump\">potential run for the White House\u003c/a>, he faces a political challenge on the homefront. Roughly 30 AI-related bills are moving through the state Legislature in the last weeks of this legislative session, and it’s estimated a dozen or so will land on Newsom’s desk.\u003c/p>\n\u003cp>Does he veto all or most of them to mollify Silicon Valley donors? Or does he defy President Donald Trump’s industry-friendly light touch and model a tougher state stance on regulation?\u003c/p>\n\u003cp>At an Aug. 7 event announcing \u003ca href=\"https://www.kqed.org/news/12051433/california-teams-with-google-microsoft-ibm-adobe-to-prepare-students-for-ai-era\">AI training partnerships\u003c/a> with Adobe, Google, IBM and Microsoft, Newsom told reporters he is trying to establish a middle ground that provides guardrails for public safety without squelching innovation: “We’ve led in AI innovation, and we’ve led in AI regulation, but we’re trying to find a balance.”\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>Newsom noted that, even though he vetoed the most \u003ca href=\"https://www.kqed.org/news/12000892/ai-safety-testing-bill-heads-for-a-floor-vote-in-sacramento-taking-heavy-fire-from-silicon-valley\">controversial bill\u003c/a> of the last legislative session, he ultimately signed 18 AI-related bills into law, addressing everything from training data transparency to deepfakes.\u003c/p>\n\u003cp>Industry voices, chief among them OpenAI’s Chief Global Affairs Officer Chris Lehane, continue to lobby against binding regulation at all levels of government. Lehane often appears to be speaking to California in his public posts.\u003c/p>\n\u003cfigure id=\"attachment_11802216\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11802216\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut.jpg\" alt=\"Assemblymember Rebecca Bauer-Kahan says local jurisdictions need the power to stop a wildfire disaster before it starts. The assemblymember and other state lawmakers announced a bill to expand enforcement actions against PG&E and other utilities on February, 18, 2020.\" width=\"1920\" height=\"1440\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-160x120.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-800x600.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1020x765.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1832x1374.jpg 1832w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1376x1032.jpg 1376w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1044x783.jpg 1044w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-632x474.jpg 632w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-536x402.jpg 536w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Assemblymember Rebecca Bauer-Kahan on Feb. 18, 2020. \u003ccite>(Eli Walsh/Bay City News)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“Imagine how hard it would have been for the US to win the Space Race if California’s aerospace and tech industries got tangled up in state-by-state regulations impeding the innovation of transistor technology,” Lehane wrote pointedly on \u003ca href=\"https://www.linkedin.com/feed/update/urn:li:activity:7361704760334274560/\">LinkedIn\u003c/a> last week.\u003c/p>\n\u003cp>But if Silicon Valley lobbyists see the industry in a “space race” against foreign adversaries, it’s not clear whether the general public shares that sense of urgency for technological advancement at any cost. An \u003ca href=\"https://www.axios.com/newsletters/axios-ai-plus-62025700-399b-11f0-b37f-b73dfdd12f1d\">Axios/Harris poll\u003c/a> released in early June 2025 showed strong majorities of Americans across all age groups want companies to take AI development slowly to “get it right the first time, even if that delays breakthroughs.”\u003c/p>\n\u003cp>Signing some of the AI bills that make it to his desk could also provide Newsom with an opportunity to stick a thumb in the eye of Trump. The challenge for Newsom is choosing which of the roughly 30 bills making their way through Sacramento are least likely to upset his supporters in Silicon Valley.\u003c/p>\n\u003cp>Assemblymember \u003ca href=\"https://www.kqed.org/news/11998817/ai-regulation-still-in-the-mix-as-california-legislature-returns-to-session\">Rebecca Bauer-Kahan, \u003c/a>D-Orinda, could be considered the foremost advocate for AI regulation this legislative session, having authored six still in play:\u003c/p>\n\u003cul>\n\u003cli>\u003cstrong>AB 222\u003c/strong> — Would require data centers that power AI to disclose how much water and electricity they use, so the public can see their environmental footprint.\u003c/li>\n\u003cli>\u003cstrong>AB 412\u003c/strong> — Would require AI developers to disclose when copyrighted works were used to train their models and give rights holders a way to check and challenge that use.\u003c/li>\n\u003cli>\u003cstrong>AB 621\u003c/strong> — Would let victims, including minors, sue creators and facilitators of non-consensual deepfake sexual material and increase the damages they can collect.\u003c/li>\n\u003cli>\u003cstrong>AB 1018\u003c/strong> — Would regulate how AI-driven decision systems are developed, tested, disclosed, audited, and appealed when used to make consequential decisions about people.\u003c/li>\n\u003cli>\u003cstrong>AB 1064\u003c/strong> — Would ban AI chatbots from manipulating children into forming emotional attachments or harvesting their personal and biometric data.\u003c/li>\n\u003cli>\u003cstrong>AB 1405\u003c/strong> — Would require AI auditors to enroll with the state, follow conflict-of-interest and reporting rules, and create a system for the public to report auditor misconduct starting in 2027.\u003c/li>\n\u003c/ul>\n\u003cp>When pressed to pick a personal favorite on the list, Bauer-Kahan chose \u003ca href=\"https://leginfo.legislature.ca.gov/faces/billTextClient.xhtml?bill_id=202520260AB1064\">AB 1064\u003c/a>, the Leading Ethical AI Development for Kids Act.\u003c/p>\n\u003cp>“I have teenage children. You know, I live it every day,” Bauer-Kahan said. “We have to step in and make sure these companies are doing right by our children.\u003c/p>\n\u003cp>“I’m driven mostly as a mother, and I think that Gov. Newsom is driven as a father,” she said. “You know, his children are going to grow up in the AI age, as are mine. And I think he wants a safe environment, and I’m hopeful that will lead him to find balance between what industry needs and what the public needs.”\u003c/p>\n\u003cfigure id=\"attachment_12005278\" class=\"wp-caption aligncenter\" style=\"max-width: 2121px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12005278\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/GettyImages-1460098524.jpg\" alt=\"Close-up female hands with a blue manicure using pink smartphone outdoors.\" width=\"2121\" height=\"1414\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/GettyImages-1460098524.jpg 2121w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/GettyImages-1460098524-800x533.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/GettyImages-1460098524-1020x680.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/GettyImages-1460098524-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/GettyImages-1460098524-1536x1024.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/GettyImages-1460098524-2048x1365.jpg 2048w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/GettyImages-1460098524-1920x1280.jpg 1920w\" sizes=\"auto, (max-width: 2121px) 100vw, 2121px\">\u003cfigcaption class=\"wp-caption-text\">Research shows that about 70% of teens use at least one kind of AI tool. \u003ccite>(Tatiana Meteleva/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Bauer-Kahan said the long game for AI legislation requires a careful effort to define terms so it’s not easy for companies to sidestep the mandate, and to protect mechanisms of enforcement so the law has teeth. For instance, if a child suffers actual harm as a result of the use of a covered product, AB 1064 allows that child, or a parent or guardian, to sue.\u003c/p>\n\u003cp>But industry critics say the bill is misguided and full of issues that could lead to unintended consequences.\u003c/p>\n\u003cp>“AB 1064’s definitions are drafted so broadly that they could unintentionally capture almost all chatbot tools, even basic customer service functions, and would require invasive age-verification to maintain functionality … It risks limiting minors’ access to lawful and beneficial AI tools — raising significant First Amendment concerns,” Robert Boykin, executive director of California and the Southwest for the trade group TechNet, wrote in an email to KQED.\u003c/p>\n\u003cp>The bill could also spur costly litigation and create substantial regulatory uncertainty, Boykin said.[aside postID=news_12051433 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1.jpg']But a bill that provides no capacity for individuals to sue is a bill without teeth, according to many who watch Sacramento politics. That is to say, the State Attorney General’s Office has limited budget, people and attention to carry the entire burden of enforcement on its proverbial shoulders, especially when it’s busy pursuing \u003ca href=\"https://oag.ca.gov/news/press-releases/six-months-second-trump-administration-attorney-general-bonta-reports-california\">more than 37 lawsuits\u003c/a> against the second Trump administration.\u003c/p>\n\u003cp>After congressional Republicans rejected a push to ban states from regulating artificial intelligence for a decade in the One Big Beautiful Bill Act, Trump appears to have signaled his intent to roll out a backdoor ban with his \u003ca href=\"https://www.whitehouse.gov/wp-content/uploads/2025/07/Americas-AI-Action-Plan.pdf\">AI Action Plan\u003c/a>. The executive orders include language like, “AI is far too important to smother in bureaucracy at this early stage, whether at the state or Federal level.”\u003c/p>\n\u003cp>The implied threat: leave off AI regulation or federal funding could be reduced.\u003c/p>\n\u003cp>“It seemed to be using money as a cudgel, but I’m not sure that will be effective here, or that’s something that really, would bother us, or prevent us in California from moving forward,” state Sen. Josh Becker, D-Menlo Park, said.\u003c/p>\n\u003cp>Becker has introduced two AI bills this session. \u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB243&firstNav=tracking\">SB 243\u003c/a> would require companion chatbots to frequently remind users that it isn’t a person, in order to reduce the risk of emotional manipulation or \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">unhealthy attachment\u003c/a>. \u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB468&firstNav=tracking\">SB 468\u003c/a> requires AI developers to design and disclose their own security measures to protect personal data, subject to state oversight.\u003c/p>\n\u003cp>Becker, a former businessman, said he works with industry lobbyists to hash out language companies consider feasible, if not unobjectionable. In the case of SB 243, Boykin of TechNet acknowledged Becker’s constructive engagement, adding, “Our goal has always been to narrow some of the sweeping provisions — particularly around the definition of companion chatbots, reporting obligations, and costly third-party audits.”\u003c/p>\n\u003cp>But Becker said California lawmakers are also well practiced in battling Silicon Valley lobbyists intent on killing or neutering legislation. “Even before Trump, there [was] a bipartisan effort to preempt our privacy laws here in California. So that’s gonna be a constant fight,” Becker said.\u003c/p>\n\u003cp>He added that he speaks with people who work in Silicon Valley who tell him they want regulation.\u003c/p>\n\u003cp>“My anecdotal experience is that there’s much more conversation by the people who work at these companies offline, about the potential impact of these technologies, than you’ll hear out of the communications people,” Becker said.\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "President Donald Trump’s calls for a light regulatory touch on AI don’t appear to be intimidating California lawmakers as they push a flurry of bills. ",
"status": "publish",
"parent": 0,
"modified": 1755792055,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 26,
"wordCount": 1363
},
"headData": {
"title": "Newsom’s Tightrope Walk Between AI Regulation and Silicon Valley Cash | KQED",
"description": "President Donald Trump’s calls for a light regulatory touch on AI don’t appear to be intimidating California lawmakers as they push a flurry of bills. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Newsom’s Tightrope Walk Between AI Regulation and Silicon Valley Cash",
"datePublished": "2025-08-19T07:00:09-07:00",
"dateModified": "2025-08-21T09:00:55-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 8,
"slug": "news",
"name": "News"
},
"audioUrl": "https://traffic.omny.fm/d/clips/0af137ef-751e-4b19-a055-aaef00d2d578/ffca7e9f-6831-41c5-bcaf-aaef00f5a073/1ed81c39-73f0-4cd1-9dc9-b33e0113e8fd/audio.mp3",
"sticky": false,
"nprStoryId": "kqed-12052617",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12052617/newsoms-tightrope-walk-between-ai-regulation-and-silicon-valley-cash",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>As Gov. Gavin Newsom eyes a \u003ca href=\"https://www.kqed.org/news/12043766/newsom-tries-to-find-political-footing-in-clash-with-trump\">potential run for the White House\u003c/a>, he faces a political challenge on the homefront. Roughly 30 AI-related bills are moving through the state Legislature in the last weeks of this legislative session, and it’s estimated a dozen or so will land on Newsom’s desk.\u003c/p>\n\u003cp>Does he veto all or most of them to mollify Silicon Valley donors? Or does he defy President Donald Trump’s industry-friendly light touch and model a tougher state stance on regulation?\u003c/p>\n\u003cp>At an Aug. 7 event announcing \u003ca href=\"https://www.kqed.org/news/12051433/california-teams-with-google-microsoft-ibm-adobe-to-prepare-students-for-ai-era\">AI training partnerships\u003c/a> with Adobe, Google, IBM and Microsoft, Newsom told reporters he is trying to establish a middle ground that provides guardrails for public safety without squelching innovation: “We’ve led in AI innovation, and we’ve led in AI regulation, but we’re trying to find a balance.”\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>Newsom noted that, even though he vetoed the most \u003ca href=\"https://www.kqed.org/news/12000892/ai-safety-testing-bill-heads-for-a-floor-vote-in-sacramento-taking-heavy-fire-from-silicon-valley\">controversial bill\u003c/a> of the last legislative session, he ultimately signed 18 AI-related bills into law, addressing everything from training data transparency to deepfakes.\u003c/p>\n\u003cp>Industry voices, chief among them OpenAI’s Chief Global Affairs Officer Chris Lehane, continue to lobby against binding regulation at all levels of government. Lehane often appears to be speaking to California in his public posts.\u003c/p>\n\u003cfigure id=\"attachment_11802216\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11802216\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut.jpg\" alt=\"Assemblymember Rebecca Bauer-Kahan says local jurisdictions need the power to stop a wildfire disaster before it starts. The assemblymember and other state lawmakers announced a bill to expand enforcement actions against PG&E and other utilities on February, 18, 2020.\" width=\"1920\" height=\"1440\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-160x120.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-800x600.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1020x765.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1832x1374.jpg 1832w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1376x1032.jpg 1376w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1044x783.jpg 1044w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-632x474.jpg 632w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-536x402.jpg 536w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Assemblymember Rebecca Bauer-Kahan on Feb. 18, 2020. \u003ccite>(Eli Walsh/Bay City News)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“Imagine how hard it would have been for the US to win the Space Race if California’s aerospace and tech industries got tangled up in state-by-state regulations impeding the innovation of transistor technology,” Lehane wrote pointedly on \u003ca href=\"https://www.linkedin.com/feed/update/urn:li:activity:7361704760334274560/\">LinkedIn\u003c/a> last week.\u003c/p>\n\u003cp>But if Silicon Valley lobbyists see the industry in a “space race” against foreign adversaries, it’s not clear whether the general public shares that sense of urgency for technological advancement at any cost. An \u003ca href=\"https://www.axios.com/newsletters/axios-ai-plus-62025700-399b-11f0-b37f-b73dfdd12f1d\">Axios/Harris poll\u003c/a> released in early June 2025 showed strong majorities of Americans across all age groups want companies to take AI development slowly to “get it right the first time, even if that delays breakthroughs.”\u003c/p>\n\u003cp>Signing some of the AI bills that make it to his desk could also provide Newsom with an opportunity to stick a thumb in the eye of Trump. The challenge for Newsom is choosing which of the roughly 30 bills making their way through Sacramento are least likely to upset his supporters in Silicon Valley.\u003c/p>\n\u003cp>Assemblymember \u003ca href=\"https://www.kqed.org/news/11998817/ai-regulation-still-in-the-mix-as-california-legislature-returns-to-session\">Rebecca Bauer-Kahan, \u003c/a>D-Orinda, could be considered the foremost advocate for AI regulation this legislative session, having authored six still in play:\u003c/p>\n\u003cul>\n\u003cli>\u003cstrong>AB 222\u003c/strong> — Would require data centers that power AI to disclose how much water and electricity they use, so the public can see their environmental footprint.\u003c/li>\n\u003cli>\u003cstrong>AB 412\u003c/strong> — Would require AI developers to disclose when copyrighted works were used to train their models and give rights holders a way to check and challenge that use.\u003c/li>\n\u003cli>\u003cstrong>AB 621\u003c/strong> — Would let victims, including minors, sue creators and facilitators of non-consensual deepfake sexual material and increase the damages they can collect.\u003c/li>\n\u003cli>\u003cstrong>AB 1018\u003c/strong> — Would regulate how AI-driven decision systems are developed, tested, disclosed, audited, and appealed when used to make consequential decisions about people.\u003c/li>\n\u003cli>\u003cstrong>AB 1064\u003c/strong> — Would ban AI chatbots from manipulating children into forming emotional attachments or harvesting their personal and biometric data.\u003c/li>\n\u003cli>\u003cstrong>AB 1405\u003c/strong> — Would require AI auditors to enroll with the state, follow conflict-of-interest and reporting rules, and create a system for the public to report auditor misconduct starting in 2027.\u003c/li>\n\u003c/ul>\n\u003cp>When pressed to pick a personal favorite on the list, Bauer-Kahan chose \u003ca href=\"https://leginfo.legislature.ca.gov/faces/billTextClient.xhtml?bill_id=202520260AB1064\">AB 1064\u003c/a>, the Leading Ethical AI Development for Kids Act.\u003c/p>\n\u003cp>“I have teenage children. You know, I live it every day,” Bauer-Kahan said. “We have to step in and make sure these companies are doing right by our children.\u003c/p>\n\u003cp>“I’m driven mostly as a mother, and I think that Gov. Newsom is driven as a father,” she said. “You know, his children are going to grow up in the AI age, as are mine. And I think he wants a safe environment, and I’m hopeful that will lead him to find balance between what industry needs and what the public needs.”\u003c/p>\n\u003cfigure id=\"attachment_12005278\" class=\"wp-caption aligncenter\" style=\"max-width: 2121px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12005278\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/GettyImages-1460098524.jpg\" alt=\"Close-up female hands with a blue manicure using pink smartphone outdoors.\" width=\"2121\" height=\"1414\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/GettyImages-1460098524.jpg 2121w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/GettyImages-1460098524-800x533.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/GettyImages-1460098524-1020x680.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/GettyImages-1460098524-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/GettyImages-1460098524-1536x1024.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/GettyImages-1460098524-2048x1365.jpg 2048w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/GettyImages-1460098524-1920x1280.jpg 1920w\" sizes=\"auto, (max-width: 2121px) 100vw, 2121px\">\u003cfigcaption class=\"wp-caption-text\">Research shows that about 70% of teens use at least one kind of AI tool. \u003ccite>(Tatiana Meteleva/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Bauer-Kahan said the long game for AI legislation requires a careful effort to define terms so it’s not easy for companies to sidestep the mandate, and to protect mechanisms of enforcement so the law has teeth. For instance, if a child suffers actual harm as a result of the use of a covered product, AB 1064 allows that child, or a parent or guardian, to sue.\u003c/p>\n\u003cp>But industry critics say the bill is misguided and full of issues that could lead to unintended consequences.\u003c/p>\n\u003cp>“AB 1064’s definitions are drafted so broadly that they could unintentionally capture almost all chatbot tools, even basic customer service functions, and would require invasive age-verification to maintain functionality … It risks limiting minors’ access to lawful and beneficial AI tools — raising significant First Amendment concerns,” Robert Boykin, executive director of California and the Southwest for the trade group TechNet, wrote in an email to KQED.\u003c/p>\n\u003cp>The bill could also spur costly litigation and create substantial regulatory uncertainty, Boykin said.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12051433",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>But a bill that provides no capacity for individuals to sue is a bill without teeth, according to many who watch Sacramento politics. That is to say, the State Attorney General’s Office has limited budget, people and attention to carry the entire burden of enforcement on its proverbial shoulders, especially when it’s busy pursuing \u003ca href=\"https://oag.ca.gov/news/press-releases/six-months-second-trump-administration-attorney-general-bonta-reports-california\">more than 37 lawsuits\u003c/a> against the second Trump administration.\u003c/p>\n\u003cp>After congressional Republicans rejected a push to ban states from regulating artificial intelligence for a decade in the One Big Beautiful Bill Act, Trump appears to have signaled his intent to roll out a backdoor ban with his \u003ca href=\"https://www.whitehouse.gov/wp-content/uploads/2025/07/Americas-AI-Action-Plan.pdf\">AI Action Plan\u003c/a>. The executive orders include language like, “AI is far too important to smother in bureaucracy at this early stage, whether at the state or Federal level.”\u003c/p>\n\u003cp>The implied threat: leave off AI regulation or federal funding could be reduced.\u003c/p>\n\u003cp>“It seemed to be using money as a cudgel, but I’m not sure that will be effective here, or that’s something that really, would bother us, or prevent us in California from moving forward,” state Sen. Josh Becker, D-Menlo Park, said.\u003c/p>\n\u003cp>Becker has introduced two AI bills this session. \u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB243&firstNav=tracking\">SB 243\u003c/a> would require companion chatbots to frequently remind users that it isn’t a person, in order to reduce the risk of emotional manipulation or \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">unhealthy attachment\u003c/a>. \u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB468&firstNav=tracking\">SB 468\u003c/a> requires AI developers to design and disclose their own security measures to protect personal data, subject to state oversight.\u003c/p>\n\u003cp>Becker, a former businessman, said he works with industry lobbyists to hash out language companies consider feasible, if not unobjectionable. In the case of SB 243, Boykin of TechNet acknowledged Becker’s constructive engagement, adding, “Our goal has always been to narrow some of the sweeping provisions — particularly around the definition of companion chatbots, reporting obligations, and costly third-party audits.”\u003c/p>\n\u003cp>But Becker said California lawmakers are also well practiced in battling Silicon Valley lobbyists intent on killing or neutering legislation. “Even before Trump, there [was] a bipartisan effort to preempt our privacy laws here in California. So that’s gonna be a constant fight,” Becker said.\u003c/p>\n\u003cp>He added that he speaks with people who work in Silicon Valley who tell him they want regulation.\u003c/p>\n\u003cp>“My anecdotal experience is that there’s much more conversation by the people who work at these companies offline, about the potential impact of these technologies, than you’ll hear out of the communications people,” Becker said.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12052617/newsoms-tightrope-walk-between-ai-regulation-and-silicon-valley-cash",
"authors": [
"251"
],
"categories": [
"news_31795",
"news_28250",
"news_8",
"news_13",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_34755",
"news_18538",
"news_22307",
"news_32668",
"news_27626",
"news_34586",
"news_1631"
],
"featImg": "news_12052766",
"label": "news"
}
},
"programsReducer": {
"all-things-considered": {
"id": "all-things-considered",
"title": "All Things Considered",
"info": "Every weekday, \u003cem>All Things Considered\u003c/em> hosts Robert Siegel, Audie Cornish, Ari Shapiro, and Kelly McEvers present the program's trademark mix of news, interviews, commentaries, reviews, and offbeat features. Michel Martin hosts on the weekends.",
"airtime": "MON-FRI 1pm-2pm, 4:30pm-6:30pm\u003cbr />SAT-SUN 5pm-6pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/All-Things-Considered-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/all-things-considered/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/all-things-considered"
},
"american-suburb-podcast": {
"id": "american-suburb-podcast",
"title": "American Suburb: The Podcast",
"tagline": "The flip side of gentrification, told through one town",
"info": "Gentrification is changing cities across America, forcing people from neighborhoods they have long called home. Call them the displaced. Now those priced out of the Bay Area are looking for a better life in an unlikely place. American Suburb follows this migration to one California town along the Delta, 45 miles from San Francisco. But is this once sleepy suburb ready for them?",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/American-Suburb-Podcast-Tile-703x703-1.jpg",
"officialWebsiteLink": "/news/series/american-suburb-podcast",
"meta": {
"site": "news",
"source": "kqed",
"order": 19
},
"link": "/news/series/american-suburb-podcast/",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/RBrW",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?mt=2&id=1287748328",
"tuneIn": "https://tunein.com/radio/American-Suburb-p1086805/",
"rss": "https://ww2.kqed.org/news/series/american-suburb-podcast/feed/podcast",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkMzMDExODgxNjA5"
}
},
"baycurious": {
"id": "baycurious",
"title": "Bay Curious",
"tagline": "Exploring the Bay Area, one question at a time",
"info": "KQED’s new podcast, Bay Curious, gets to the bottom of the mysteries — both profound and peculiar — that give the Bay Area its unique identity. And we’ll do it with your help! You ask the questions. You decide what Bay Curious investigates. And you join us on the journey to find the answers.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Bay-Curious-Podcast-Tile-703x703-1.jpg",
"imageAlt": "\"KQED Bay Curious",
"officialWebsiteLink": "/news/series/baycurious",
"meta": {
"site": "news",
"source": "kqed",
"order": 3
},
"link": "/podcasts/baycurious",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/bay-curious/id1172473406",
"npr": "https://www.npr.org/podcasts/500557090/bay-curious",
"rss": "https://ww2.kqed.org/news/category/bay-curious-podcast/feed/podcast",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93dzIua3FlZC5vcmcvbmV3cy9jYXRlZ29yeS9iYXktY3VyaW91cy1wb2RjYXN0L2ZlZWQvcG9kY2FzdA",
"stitcher": "https://www.stitcher.com/podcast/kqed/bay-curious",
"spotify": "https://open.spotify.com/show/6O76IdmhixfijmhTZLIJ8k"
}
},
"bbc-world-service": {
"id": "bbc-world-service",
"title": "BBC World Service",
"info": "The day's top stories from BBC News compiled twice daily in the week, once at weekends.",
"airtime": "MON-FRI 9pm-10pm, TUE-FRI 1am-2am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/BBC-World-Service-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.bbc.co.uk/sounds/play/live:bbc_world_service",
"meta": {
"site": "news",
"source": "BBC World Service"
},
"link": "/radio/program/bbc-world-service",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/global-news-podcast/id135067274?mt=2",
"tuneIn": "https://tunein.com/radio/BBC-World-Service-p455581/",
"rss": "https://podcasts.files.bbci.co.uk/p02nq0gn.rss"
}
},
"californiareport": {
"id": "californiareport",
"title": "The California Report",
"tagline": "California, day by day",
"info": "KQED’s statewide radio news program providing daily coverage of issues, trends and public policy decisions.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-California-Report-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The California Report",
"officialWebsiteLink": "/californiareport",
"meta": {
"site": "news",
"source": "kqed",
"order": 8
},
"link": "/californiareport",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/kqeds-the-california-report/id79681292",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM1MDAyODE4NTgz",
"npr": "https://www.npr.org/podcasts/432285393/the-california-report",
"stitcher": "https://www.stitcher.com/podcast/kqedfm-kqeds-the-california-report-podcast-8838",
"rss": "https://ww2.kqed.org/news/tag/tcram/feed/podcast"
}
},
"californiareportmagazine": {
"id": "californiareportmagazine",
"title": "The California Report Magazine",
"tagline": "Your state, your stories",
"info": "Every week, The California Report Magazine takes you on a road trip for the ears: to visit the places and meet the people who make California unique. The in-depth storytelling podcast from the California Report.",
"airtime": "FRI 4:30pm-5pm, 6:30pm-7pm, 11pm-11:30pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-California-Report-Magazine-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The California Report Magazine",
"officialWebsiteLink": "/californiareportmagazine",
"meta": {
"site": "news",
"source": "kqed",
"order": 10
},
"link": "/californiareportmagazine",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-california-report-magazine/id1314750545",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM3NjkwNjk1OTAz",
"npr": "https://www.npr.org/podcasts/564733126/the-california-report-magazine",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-california-report-magazine",
"rss": "https://ww2.kqed.org/news/tag/tcrmag/feed/podcast"
}
},
"city-arts": {
"id": "city-arts",
"title": "City Arts & Lectures",
"info": "A one-hour radio program to hear celebrated writers, artists and thinkers address contemporary ideas and values, often discussing the creative process. Please note: tapes or transcripts are not available",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/05/cityartsandlecture-300x300.jpg",
"officialWebsiteLink": "https://www.cityarts.net/",
"airtime": "SUN 1pm-2pm, TUE 10pm, WED 1am",
"meta": {
"site": "news",
"source": "City Arts & Lectures"
},
"link": "https://www.cityarts.net",
"subscribe": {
"tuneIn": "https://tunein.com/radio/City-Arts-and-Lectures-p692/",
"rss": "https://www.cityarts.net/feed/"
}
},
"closealltabs": {
"id": "closealltabs",
"title": "Close All Tabs",
"tagline": "Your irreverent guide to the trends redefining our world",
"info": "Close All Tabs breaks down how digital culture shapes our world through thoughtful insights and irreverent humor.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/02/CAT_2_Tile-scaled.jpg",
"imageAlt": "\"KQED Close All Tabs",
"officialWebsiteLink": "/podcasts/closealltabs",
"meta": {
"site": "news",
"source": "kqed",
"order": 1
},
"link": "/podcasts/closealltabs",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/close-all-tabs/id214663465",
"rss": "https://feeds.megaphone.fm/KQINC6993880386",
"amazon": "https://music.amazon.com/podcasts/92d9d4ac-67a3-4eed-b10a-fb45d45b1ef2/close-all-tabs",
"spotify": "https://open.spotify.com/show/6LAJFHnGK1pYXYzv6SIol6?si=deb0cae19813417c"
}
},
"code-switch-life-kit": {
"id": "code-switch-life-kit",
"title": "Code Switch / Life Kit",
"info": "\u003cem>Code Switch\u003c/em>, which listeners will hear in the first part of the hour, has fearless and much-needed conversations about race. Hosted by journalists of color, the show tackles the subject of race head-on, exploring how it impacts every part of society — from politics and pop culture to history, sports and more.\u003cbr />\u003cbr />\u003cem>Life Kit\u003c/em>, which will be in the second part of the hour, guides you through spaces and feelings no one prepares you for — from finances to mental health, from workplace microaggressions to imposter syndrome, from relationships to parenting. The show features experts with real world experience and shares their knowledge. Because everyone needs a little help being human.\u003cbr />\u003cbr />\u003ca href=\"https://www.npr.org/podcasts/510312/codeswitch\">\u003cem>Code Switch\u003c/em> offical site and podcast\u003c/a>\u003cbr />\u003ca href=\"https://www.npr.org/lifekit\">\u003cem>Life Kit\u003c/em> offical site and podcast\u003c/a>\u003cbr />",
"airtime": "SUN 9pm-10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Code-Switch-Life-Kit-Podcast-Tile-360x360-1.jpg",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/code-switch-life-kit",
"subscribe": {
"apple": "https://podcasts.apple.com/podcast/1112190608?mt=2&at=11l79Y&ct=nprdirectory",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93d3cubnByLm9yZy9yc3MvcG9kY2FzdC5waHA_aWQ9NTEwMzEy",
"spotify": "https://open.spotify.com/show/3bExJ9JQpkwNhoHvaIIuyV",
"rss": "https://feeds.npr.org/510312/podcast.xml"
}
},
"commonwealth-club": {
"id": "commonwealth-club",
"title": "Commonwealth Club of California Podcast",
"info": "The Commonwealth Club of California is the nation's oldest and largest public affairs forum. As a non-partisan forum, The Club brings to the public airwaves diverse viewpoints on important topics. The Club's weekly radio broadcast - the oldest in the U.S., dating back to 1924 - is carried across the nation on public radio stations and is now podcasting. Our website archive features audio of our recent programs, as well as selected speeches from our long and distinguished history. This podcast feed is usually updated twice a week and is always un-edited.",
"airtime": "THU 10pm, FRI 1am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Commonwealth-Club-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.commonwealthclub.org/podcasts",
"meta": {
"site": "news",
"source": "Commonwealth Club of California"
},
"link": "/radio/program/commonwealth-club",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/commonwealth-club-of-california-podcast/id976334034?mt=2",
"google": "https://podcasts.google.com/feed/aHR0cDovL3d3dy5jb21tb253ZWFsdGhjbHViLm9yZy9hdWRpby9wb2RjYXN0L3dlZWtseS54bWw",
"tuneIn": "https://tunein.com/radio/Commonwealth-Club-of-California-p1060/"
}
},
"forum": {
"id": "forum",
"title": "Forum",
"tagline": "The conversation starts here",
"info": "KQED’s live call-in program discussing local, state, national and international issues, as well as in-depth interviews.",
"airtime": "MON-FRI 9am-11am, 10pm-11pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Forum-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Forum with Mina Kim and Alexis Madrigal",
"officialWebsiteLink": "/forum",
"meta": {
"site": "news",
"source": "kqed",
"order": 9
},
"link": "/forum",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/kqeds-forum/id73329719",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM5NTU3MzgxNjMz",
"npr": "https://www.npr.org/podcasts/432307980/forum",
"stitcher": "https://www.stitcher.com/podcast/kqedfm-kqeds-forum-podcast",
"rss": "https://feeds.megaphone.fm/KQINC9557381633"
}
},
"freakonomics-radio": {
"id": "freakonomics-radio",
"title": "Freakonomics Radio",
"info": "Freakonomics Radio is a one-hour award-winning podcast and public-radio project hosted by Stephen Dubner, with co-author Steve Levitt as a regular guest. It is produced in partnership with WNYC.",
"imageSrc": "https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/05/freakonomicsRadio.png",
"officialWebsiteLink": "http://freakonomics.com/",
"airtime": "SUN 1am-2am, SAT 3pm-4pm",
"meta": {
"site": "radio",
"source": "WNYC"
},
"link": "/radio/program/freakonomics-radio",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/4s8b",
"apple": "https://itunes.apple.com/us/podcast/freakonomics-radio/id354668519",
"tuneIn": "https://tunein.com/podcasts/WNYC-Podcasts/Freakonomics-Radio-p272293/",
"rss": "https://feeds.feedburner.com/freakonomicsradio"
}
},
"fresh-air": {
"id": "fresh-air",
"title": "Fresh Air",
"info": "Hosted by Terry Gross, \u003cem>Fresh Air from WHYY\u003c/em> is the Peabody Award-winning weekday magazine of contemporary arts and issues. One of public radio's most popular programs, Fresh Air features intimate conversations with today's biggest luminaries.",
"airtime": "MON-FRI 7pm-8pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Fresh-Air-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/fresh-air/",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/fresh-air",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/4s8b",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=214089682&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Fresh-Air-p17/",
"rss": "https://feeds.npr.org/381444908/podcast.xml"
}
},
"here-and-now": {
"id": "here-and-now",
"title": "Here & Now",
"info": "A live production of NPR and WBUR Boston, in collaboration with stations across the country, Here & Now reflects the fluid world of news as it's happening in the middle of the day, with timely, in-depth news, interviews and conversation. Hosted by Robin Young, Jeremy Hobson and Tonya Mosley.",
"airtime": "MON-THU 11am-12pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Here-And-Now-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "http://www.wbur.org/hereandnow",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/here-and-now",
"subsdcribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?mt=2&id=426698661",
"tuneIn": "https://tunein.com/radio/Here--Now-p211/",
"rss": "https://feeds.npr.org/510051/podcast.xml"
}
},
"hidden-brain": {
"id": "hidden-brain",
"title": "Hidden Brain",
"info": "Shankar Vedantam uses science and storytelling to reveal the unconscious patterns that drive human behavior, shape our choices and direct our relationships.",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/05/hiddenbrain.jpg",
"officialWebsiteLink": "https://www.npr.org/series/423302056/hidden-brain",
"airtime": "SUN 7pm-8pm",
"meta": {
"site": "news",
"source": "NPR"
},
"link": "/radio/program/hidden-brain",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/hidden-brain/id1028908750?mt=2",
"tuneIn": "https://tunein.com/podcasts/Science-Podcasts/Hidden-Brain-p787503/",
"rss": "https://feeds.npr.org/510308/podcast.xml"
}
},
"how-i-built-this": {
"id": "how-i-built-this",
"title": "How I Built This with Guy Raz",
"info": "Guy Raz dives into the stories behind some of the world's best known companies. How I Built This weaves a narrative journey about innovators, entrepreneurs and idealists—and the movements they built.",
"imageSrc": "https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/05/howIBuiltThis.png",
"officialWebsiteLink": "https://www.npr.org/podcasts/510313/how-i-built-this",
"airtime": "SUN 7:30pm-8pm",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/how-i-built-this",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/3zxy",
"apple": "https://itunes.apple.com/us/podcast/how-i-built-this-with-guy-raz/id1150510297?mt=2",
"tuneIn": "https://tunein.com/podcasts/Arts--Culture-Podcasts/How-I-Built-This-p910896/",
"rss": "https://feeds.npr.org/510313/podcast.xml"
}
},
"hyphenacion": {
"id": "hyphenacion",
"title": "Hyphenación",
"tagline": "Where conversation and cultura meet",
"info": "What kind of no sabo word is Hyphenación? For us, it’s about living within a hyphenation. Like being a third-gen Mexican-American from the Texas border now living that Bay Area Chicano life. Like Xorje! Each week we bring together a couple of hyphenated Latinos to talk all about personal life choices: family, careers, relationships, belonging … everything is on the table. ",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/03/Hyphenacion_FinalAssets_PodcastTile.png",
"imageAlt": "KQED Hyphenación",
"officialWebsiteLink": "/podcasts/hyphenacion",
"meta": {
"site": "news",
"source": "kqed",
"order": 15
},
"link": "/podcasts/hyphenacion",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/hyphenaci%C3%B3n/id1191591838",
"spotify": "https://open.spotify.com/show/2p3Fifq96nw9BPcmFdIq0o?si=39209f7b25774f38",
"youtube": "https://www.youtube.com/c/kqedarts",
"amazon": "https://music.amazon.com/podcasts/6c3dd23c-93fb-4aab-97ba-1725fa6315f1/hyphenaci%C3%B3n",
"rss": "https://feeds.megaphone.fm/KQINC2275451163"
}
},
"jerrybrown": {
"id": "jerrybrown",
"title": "The Political Mind of Jerry Brown",
"tagline": "Lessons from a lifetime in politics",
"info": "The Political Mind of Jerry Brown brings listeners the wisdom of the former Governor, Mayor, and presidential candidate. Scott Shafer interviewed Brown for more than 40 hours, covering the former governor's life and half-century in the political game and Brown has some lessons he'd like to share. ",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Political-Mind-of-Jerry-Brown-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Political Mind of Jerry Brown",
"officialWebsiteLink": "/podcasts/jerrybrown",
"meta": {
"site": "news",
"source": "kqed",
"order": 18
},
"link": "/podcasts/jerrybrown",
"subscribe": {
"npr": "https://www.npr.org/podcasts/790253322/the-political-mind-of-jerry-brown",
"apple": "https://itunes.apple.com/us/podcast/id1492194549",
"rss": "https://ww2.kqed.org/news/series/jerrybrown/feed/podcast/",
"tuneIn": "http://tun.in/pjGcK",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-political-mind-of-jerry-brown",
"spotify": "https://open.spotify.com/show/54C1dmuyFyKMFttY6X2j6r?si=K8SgRCoISNK6ZbjpXrX5-w",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93dzIua3FlZC5vcmcvbmV3cy9zZXJpZXMvamVycnlicm93bi9mZWVkL3BvZGNhc3Qv"
}
},
"latino-usa": {
"id": "latino-usa",
"title": "Latino USA",
"airtime": "MON 1am-2am, SUN 6pm-7pm",
"info": "Latino USA, the radio journal of news and culture, is the only national, English-language radio program produced from a Latino perspective.",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/latinoUsa.jpg",
"officialWebsiteLink": "http://latinousa.org/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/latino-usa",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/xtTd",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=79681317&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Latino-USA-p621/",
"rss": "https://feeds.npr.org/510016/podcast.xml"
}
},
"marketplace": {
"id": "marketplace",
"title": "Marketplace",
"info": "Our flagship program, helmed by Kai Ryssdal, examines what the day in money delivered, through stories, conversations, newsworthy numbers and more. Updated Monday through Friday at about 3:30 p.m. PT.",
"airtime": "MON-FRI 4pm-4:30pm, MON-WED 6:30pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Marketplace-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.marketplace.org/",
"meta": {
"site": "news",
"source": "American Public Media"
},
"link": "/radio/program/marketplace",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=201853034&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/APM-Marketplace-p88/",
"rss": "https://feeds.publicradio.org/public_feeds/marketplace-pm/rss/rss"
}
},
"masters-of-scale": {
"id": "masters-of-scale",
"title": "Masters of Scale",
"info": "Masters of Scale is an original podcast in which LinkedIn co-founder and Greylock Partner Reid Hoffman sets out to describe and prove theories that explain how great entrepreneurs take their companies from zero to a gazillion in ingenious fashion.",
"airtime": "Every other Wednesday June 12 through October 16 at 8pm (repeats Thursdays at 2am)",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Masters-of-Scale-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://mastersofscale.com/",
"meta": {
"site": "radio",
"source": "WaitWhat"
},
"link": "/radio/program/masters-of-scale",
"subscribe": {
"apple": "http://mastersofscale.app.link/",
"rss": "https://rss.art19.com/masters-of-scale"
}
},
"mindshift": {
"id": "mindshift",
"title": "MindShift",
"tagline": "A podcast about the future of learning and how we raise our kids",
"info": "The MindShift podcast explores the innovations in education that are shaping how kids learn. Hosts Ki Sung and Katrina Schwartz introduce listeners to educators, researchers, parents and students who are developing effective ways to improve how kids learn. We cover topics like how fed-up administrators are developing surprising tactics to deal with classroom disruptions; how listening to podcasts are helping kids develop reading skills; the consequences of overparenting; and why interdisciplinary learning can engage students on all ends of the traditional achievement spectrum. This podcast is part of the MindShift education site, a division of KQED News. KQED is an NPR/PBS member station based in San Francisco. You can also visit the MindShift website for episodes and supplemental blog posts or tweet us \u003ca href=\"https://twitter.com/MindShiftKQED\">@MindShiftKQED\u003c/a> or visit us at \u003ca href=\"/mindshift\">MindShift.KQED.org\u003c/a>",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Mindshift-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED MindShift: How We Will Learn",
"officialWebsiteLink": "/mindshift/",
"meta": {
"site": "news",
"source": "kqed",
"order": 12
},
"link": "/podcasts/mindshift",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/mindshift-podcast/id1078765985",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM1NzY0NjAwNDI5",
"npr": "https://www.npr.org/podcasts/464615685/mind-shift-podcast",
"stitcher": "https://www.stitcher.com/podcast/kqed/stories-teachers-share",
"spotify": "https://open.spotify.com/show/0MxSpNYZKNprFLCl7eEtyx"
}
},
"morning-edition": {
"id": "morning-edition",
"title": "Morning Edition",
"info": "\u003cem>Morning Edition\u003c/em> takes listeners around the country and the world with multi-faceted stories and commentaries every weekday. Hosts Steve Inskeep, David Greene and Rachel Martin bring you the latest breaking news and features to prepare you for the day.",
"airtime": "MON-FRI 3am-9am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Morning-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/morning-edition/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/morning-edition"
},
"onourwatch": {
"id": "onourwatch",
"title": "On Our Watch",
"tagline": "Deeply-reported investigative journalism",
"info": "For decades, the process for how police police themselves has been inconsistent – if not opaque. In some states, like California, these proceedings were completely hidden. After a new police transparency law unsealed scores of internal affairs files, our reporters set out to examine these cases and the shadow world of police discipline. On Our Watch brings listeners into the rooms where officers are questioned and witnesses are interrogated to find out who this system is really protecting. Is it the officers, or the public they've sworn to serve?",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/On-Our-Watch-Podcast-Tile-703x703-1.jpg",
"imageAlt": "On Our Watch from NPR and KQED",
"officialWebsiteLink": "/podcasts/onourwatch",
"meta": {
"site": "news",
"source": "kqed",
"order": 11
},
"link": "/podcasts/onourwatch",
"subscribe": {
"apple": "https://podcasts.apple.com/podcast/id1567098962",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5ucHIub3JnLzUxMDM2MC9wb2RjYXN0LnhtbD9zYz1nb29nbGVwb2RjYXN0cw",
"npr": "https://rpb3r.app.goo.gl/onourwatch",
"spotify": "https://open.spotify.com/show/0OLWoyizopu6tY1XiuX70x",
"tuneIn": "https://tunein.com/radio/On-Our-Watch-p1436229/",
"stitcher": "https://www.stitcher.com/show/on-our-watch",
"rss": "https://feeds.npr.org/510360/podcast.xml"
}
},
"on-the-media": {
"id": "on-the-media",
"title": "On The Media",
"info": "Our weekly podcast explores how the media 'sausage' is made, casts an incisive eye on fluctuations in the marketplace of ideas, and examines threats to the freedom of information and expression in America and abroad. For one hour a week, the show tries to lift the veil from the process of \"making media,\" especially news media, because it's through that lens that we see the world and the world sees us",
"airtime": "SUN 2pm-3pm, MON 12am-1am",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/onTheMedia.png",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/otm",
"meta": {
"site": "news",
"source": "wnyc"
},
"link": "/radio/program/on-the-media",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/on-the-media/id73330715?mt=2",
"tuneIn": "https://tunein.com/radio/On-the-Media-p69/",
"rss": "http://feeds.wnyc.org/onthemedia"
}
},
"pbs-newshour": {
"id": "pbs-newshour",
"title": "PBS NewsHour",
"info": "Analysis, background reports and updates from the PBS NewsHour putting today's news in context.",
"airtime": "MON-FRI 3pm-4pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/PBS-News-Hour-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.pbs.org/newshour/",
"meta": {
"site": "news",
"source": "pbs"
},
"link": "/radio/program/pbs-newshour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/pbs-newshour-full-show/id394432287?mt=2",
"tuneIn": "https://tunein.com/radio/PBS-NewsHour---Full-Show-p425698/",
"rss": "https://www.pbs.org/newshour/feeds/rss/podcasts/show"
}
},
"perspectives": {
"id": "perspectives",
"title": "Perspectives",
"tagline": "KQED's series of daily listener commentaries since 1991",
"info": "KQED's series of daily listener commentaries since 1991.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/01/Perspectives_Tile_Final.jpg",
"officialWebsiteLink": "/perspectives/",
"meta": {
"site": "radio",
"source": "kqed",
"order": 14
},
"link": "/perspectives",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/id73801135",
"npr": "https://www.npr.org/podcasts/432309616/perspectives",
"rss": "https://ww2.kqed.org/perspectives/category/perspectives/feed/",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93dzIua3FlZC5vcmcvcGVyc3BlY3RpdmVzL2NhdGVnb3J5L3BlcnNwZWN0aXZlcy9mZWVkLw"
}
},
"planet-money": {
"id": "planet-money",
"title": "Planet Money",
"info": "The economy explained. Imagine you could call up a friend and say, Meet me at the bar and tell me what's going on with the economy. Now imagine that's actually a fun evening.",
"airtime": "SUN 3pm-4pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/planetmoney.jpg",
"officialWebsiteLink": "https://www.npr.org/sections/money/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/planet-money",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/M4f5",
"apple": "https://itunes.apple.com/us/podcast/planet-money/id290783428?mt=2",
"tuneIn": "https://tunein.com/podcasts/Business--Economics-Podcasts/Planet-Money-p164680/",
"rss": "https://feeds.npr.org/510289/podcast.xml"
}
},
"politicalbreakdown": {
"id": "politicalbreakdown",
"title": "Political Breakdown",
"tagline": "Politics from a personal perspective",
"info": "Political Breakdown is a new series that explores the political intersection of California and the nation. Each week hosts Scott Shafer and Marisa Lagos are joined with a new special guest to unpack politics -- with personality — and offer an insider’s glimpse at how politics happens.",
"airtime": "THU 6:30pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Political-Breakdown-2024-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Political Breakdown",
"officialWebsiteLink": "/podcasts/politicalbreakdown",
"meta": {
"site": "radio",
"source": "kqed",
"order": 5
},
"link": "/podcasts/politicalbreakdown",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/political-breakdown/id1327641087",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM5Nzk2MzI2MTEx",
"npr": "https://www.npr.org/podcasts/572155894/political-breakdown",
"stitcher": "https://www.stitcher.com/podcast/kqed/political-breakdown",
"spotify": "https://open.spotify.com/show/07RVyIjIdk2WDuVehvBMoN",
"rss": "https://ww2.kqed.org/news/tag/political-breakdown/feed/podcast"
}
},
"possible": {
"id": "possible",
"title": "Possible",
"info": "Possible is hosted by entrepreneur Reid Hoffman and writer Aria Finger. Together in Possible, Hoffman and Finger lead enlightening discussions about building a brighter collective future. The show features interviews with visionary guests like Trevor Noah, Sam Altman and Janette Sadik-Khan. Possible paints an optimistic portrait of the world we can create through science, policy, business, art and our shared humanity. It asks: What if everything goes right for once? How can we get there? Each episode also includes a short fiction story generated by advanced AI GPT-4, serving as a thought-provoking springboard to speculate how humanity could leverage technology for good.",
"airtime": "SUN 2pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Possible-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.possible.fm/",
"meta": {
"site": "news",
"source": "Possible"
},
"link": "/radio/program/possible",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/possible/id1677184070",
"spotify": "https://open.spotify.com/show/730YpdUSNlMyPQwNnyjp4k"
}
},
"pri-the-world": {
"id": "pri-the-world",
"title": "PRI's The World: Latest Edition",
"info": "Each weekday, host Marco Werman and his team of producers bring you the world's most interesting stories in an hour of radio that reminds us just how small our planet really is.",
"airtime": "MON-FRI 2pm-3pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-World-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.pri.org/programs/the-world",
"meta": {
"site": "news",
"source": "PRI"
},
"link": "/radio/program/pri-the-world",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/pris-the-world-latest-edition/id278196007?mt=2",
"tuneIn": "https://tunein.com/podcasts/News--Politics-Podcasts/PRIs-The-World-p24/",
"rss": "http://feeds.feedburner.com/pri/theworld"
}
},
"radiolab": {
"id": "radiolab",
"title": "Radiolab",
"info": "A two-time Peabody Award-winner, Radiolab is an investigation told through sounds and stories, and centered around one big idea. In the Radiolab world, information sounds like music and science and culture collide. Hosted by Jad Abumrad and Robert Krulwich, the show is designed for listeners who demand skepticism, but appreciate wonder. WNYC Studios is the producer of other leading podcasts including Freakonomics Radio, Death, Sex & Money, On the Media and many more.",
"airtime": "SUN 12am-1am, SAT 2pm-3pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/radiolab1400.png",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/radiolab/",
"meta": {
"site": "science",
"source": "WNYC"
},
"link": "/radio/program/radiolab",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/radiolab/id152249110?mt=2",
"tuneIn": "https://tunein.com/radio/RadioLab-p68032/",
"rss": "https://feeds.wnyc.org/radiolab"
}
},
"reveal": {
"id": "reveal",
"title": "Reveal",
"info": "Created by The Center for Investigative Reporting and PRX, Reveal is public radios first one-hour weekly radio show and podcast dedicated to investigative reporting. Credible, fact based and without a partisan agenda, Reveal combines the power and artistry of driveway moment storytelling with data-rich reporting on critically important issues. The result is stories that inform and inspire, arming our listeners with information to right injustices, hold the powerful accountable and improve lives.Reveal is hosted by Al Letson and showcases the award-winning work of CIR and newsrooms large and small across the nation. In a radio and podcast market crowded with choices, Reveal focuses on important and often surprising stories that illuminate the world for our listeners.",
"airtime": "SAT 4pm-5pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/reveal300px.png",
"officialWebsiteLink": "https://www.revealnews.org/episodes/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/reveal",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/reveal/id886009669",
"tuneIn": "https://tunein.com/radio/Reveal-p679597/",
"rss": "http://feeds.revealradio.org/revealpodcast"
}
},
"rightnowish": {
"id": "rightnowish",
"title": "Rightnowish",
"tagline": "Art is where you find it",
"info": "Rightnowish digs into life in the Bay Area right now… ish. Journalist Pendarvis Harshaw takes us to galleries painted on the sides of liquor stores in West Oakland. We'll dance in warehouses in the Bayview, make smoothies with kids in South Berkeley, and listen to classical music in a 1984 Cutlass Supreme in Richmond. Every week, Pen talks to movers and shakers about how the Bay Area shapes what they create, and how they shape the place we call home.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Rightnowish-Podcast-Tile-500x500-1.jpg",
"imageAlt": "KQED Rightnowish with Pendarvis Harshaw",
"officialWebsiteLink": "/podcasts/rightnowish",
"meta": {
"site": "arts",
"source": "kqed",
"order": 16
},
"link": "/podcasts/rightnowish",
"subscribe": {
"npr": "https://www.npr.org/podcasts/721590300/rightnowish",
"rss": "https://ww2.kqed.org/arts/programs/rightnowish/feed/podcast",
"apple": "https://podcasts.apple.com/us/podcast/rightnowish/id1482187648",
"stitcher": "https://www.stitcher.com/podcast/kqed/rightnowish",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkMxMjU5MTY3NDc4",
"spotify": "https://open.spotify.com/show/7kEJuafTzTVan7B78ttz1I"
}
},
"science-friday": {
"id": "science-friday",
"title": "Science Friday",
"info": "Science Friday is a weekly science talk show, broadcast live over public radio stations nationwide. Each week, the show focuses on science topics that are in the news and tries to bring an educated, balanced discussion to bear on the scientific issues at hand. Panels of expert guests join host Ira Flatow, a veteran science journalist, to discuss science and to take questions from listeners during the call-in portion of the program.",
"airtime": "FRI 11am-1pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Science-Friday-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/science-friday",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/science-friday",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=73329284&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Science-Friday-p394/",
"rss": "http://feeds.wnyc.org/science-friday"
}
},
"snap-judgment": {
"id": "snap-judgment",
"title": "Snap Judgment",
"tagline": "Real stories with killer beats",
"info": "The Snap Judgment radio show and podcast mixes real stories with killer beats to produce cinematic, dramatic radio. Snap's musical brand of storytelling dares listeners to see the world through the eyes of another. This is storytelling... with a BEAT!! Snap first aired on public radio stations nationwide in July 2010. Today, Snap Judgment airs on over 450 public radio stations and is brought to the airwaves by KQED & PRX.",
"airtime": "SAT 1pm-2pm, 9pm-10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/05/Snap-Judgment-Podcast-Tile-703x703-1.jpg",
"officialWebsiteLink": "https://snapjudgment.org",
"meta": {
"site": "arts",
"source": "kqed",
"order": 4
},
"link": "https://snapjudgment.org",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/snap-judgment/id283657561",
"npr": "https://www.npr.org/podcasts/449018144/snap-judgment",
"stitcher": "https://www.pandora.com/podcast/snap-judgment/PC:241?source=stitcher-sunset",
"spotify": "https://open.spotify.com/show/3Cct7ZWmxHNAtLgBTqjC5v",
"rss": "https://snap.feed.snapjudgment.org/"
}
},
"soldout": {
"id": "soldout",
"title": "SOLD OUT: Rethinking Housing in America",
"tagline": "A new future for housing",
"info": "Sold Out: Rethinking Housing in America",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Sold-Out-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Sold Out: Rethinking Housing in America",
"officialWebsiteLink": "/podcasts/soldout",
"meta": {
"site": "news",
"source": "kqed",
"order": 13
},
"link": "/podcasts/soldout",
"subscribe": {
"npr": "https://www.npr.org/podcasts/911586047/s-o-l-d-o-u-t-a-new-future-for-housing",
"apple": "https://podcasts.apple.com/us/podcast/introducing-sold-out-rethinking-housing-in-america/id1531354937",
"rss": "https://feeds.megaphone.fm/soldout",
"spotify": "https://open.spotify.com/show/38dTBSk2ISFoPiyYNoKn1X",
"stitcher": "https://www.stitcher.com/podcast/kqed/sold-out-rethinking-housing-in-america",
"tunein": "https://tunein.com/radio/SOLD-OUT-Rethinking-Housing-in-America-p1365871/",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vc29sZG91dA"
}
},
"spooked": {
"id": "spooked",
"title": "Spooked",
"tagline": "True-life supernatural stories",
"info": "",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/10/Spooked-Podcast-Tile-703x703-1.jpg",
"imageAlt": "",
"officialWebsiteLink": "https://spookedpodcast.org/",
"meta": {
"site": "news",
"source": "kqed",
"order": 7
},
"link": "https://spookedpodcast.org/",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/spooked/id1279361017",
"npr": "https://www.npr.org/podcasts/549547848/snap-judgment-presents-spooked",
"spotify": "https://open.spotify.com/show/76571Rfl3m7PLJQZKQIGCT",
"rss": "https://feeds.simplecast.com/TBotaapn"
}
},
"tech-nation": {
"id": "tech-nation",
"title": "Tech Nation Radio Podcast",
"info": "Tech Nation is a weekly public radio program, hosted by Dr. Moira Gunn. Founded in 1993, it has grown from a simple interview show to a multi-faceted production, featuring conversations with noted technology and science leaders, and a weekly science and technology-related commentary.",
"airtime": "FRI 10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Tech-Nation-Radio-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "http://technation.podomatic.com/",
"meta": {
"site": "science",
"source": "Tech Nation Media"
},
"link": "/radio/program/tech-nation",
"subscribe": {
"rss": "https://technation.podomatic.com/rss2.xml"
}
},
"ted-radio-hour": {
"id": "ted-radio-hour",
"title": "TED Radio Hour",
"info": "The TED Radio Hour is a journey through fascinating ideas, astonishing inventions, fresh approaches to old problems, and new ways to think and create.",
"airtime": "SUN 3pm-4pm, SAT 10pm-11pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/tedRadioHour.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/ted-radio-hour/?showDate=2018-06-22",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/ted-radio-hour",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/8vsS",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=523121474&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/TED-Radio-Hour-p418021/",
"rss": "https://feeds.npr.org/510298/podcast.xml"
}
},
"thebay": {
"id": "thebay",
"title": "The Bay",
"tagline": "Local news to keep you rooted",
"info": "Host Devin Katayama walks you through the biggest story of the day with reporters and newsmakers.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Bay-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Bay",
"officialWebsiteLink": "/podcasts/thebay",
"meta": {
"site": "radio",
"source": "kqed",
"order": 2
},
"link": "/podcasts/thebay",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-bay/id1350043452",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM4MjU5Nzg2MzI3",
"npr": "https://www.npr.org/podcasts/586725995/the-bay",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-bay",
"spotify": "https://open.spotify.com/show/4BIKBKIujizLHlIlBNaAqQ",
"rss": "https://feeds.megaphone.fm/KQINC8259786327"
}
},
"thelatest": {
"id": "thelatest",
"title": "The Latest",
"tagline": "Trusted local news in real time",
"info": "",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/05/The-Latest-2025-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Latest",
"officialWebsiteLink": "/thelatest",
"meta": {
"site": "news",
"source": "kqed",
"order": 6
},
"link": "/thelatest",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-latest-from-kqed/id1197721799",
"npr": "https://www.npr.org/podcasts/1257949365/the-latest-from-k-q-e-d",
"spotify": "https://open.spotify.com/show/5KIIXMgM9GTi5AepwOYvIZ?si=bd3053fec7244dba",
"rss": "https://feeds.megaphone.fm/KQINC9137121918"
}
},
"theleap": {
"id": "theleap",
"title": "The Leap",
"tagline": "What if you closed your eyes, and jumped?",
"info": "Stories about people making dramatic, risky changes, told by award-winning public radio reporter Judy Campbell.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Leap-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Leap",
"officialWebsiteLink": "/podcasts/theleap",
"meta": {
"site": "news",
"source": "kqed",
"order": 17
},
"link": "/podcasts/theleap",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-leap/id1046668171",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM0NTcwODQ2MjY2",
"npr": "https://www.npr.org/podcasts/447248267/the-leap",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-leap",
"spotify": "https://open.spotify.com/show/3sSlVHHzU0ytLwuGs1SD1U",
"rss": "https://ww2.kqed.org/news/programs/the-leap/feed/podcast"
}
},
"the-moth-radio-hour": {
"id": "the-moth-radio-hour",
"title": "The Moth Radio Hour",
"info": "Since its launch in 1997, The Moth has presented thousands of true stories, told live and without notes, to standing-room-only crowds worldwide. Moth storytellers stand alone, under a spotlight, with only a microphone and a roomful of strangers. The storyteller and the audience embark on a high-wire act of shared experience which is both terrifying and exhilarating. Since 2008, The Moth podcast has featured many of our favorite stories told live on Moth stages around the country. For information on all of our programs and live events, visit themoth.org.",
"airtime": "SAT 8pm-9pm and SUN 11am-12pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/theMoth.jpg",
"officialWebsiteLink": "https://themoth.org/",
"meta": {
"site": "arts",
"source": "prx"
},
"link": "/radio/program/the-moth-radio-hour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/the-moth-podcast/id275699983?mt=2",
"tuneIn": "https://tunein.com/radio/The-Moth-p273888/",
"rss": "http://feeds.themoth.org/themothpodcast"
}
},
"the-new-yorker-radio-hour": {
"id": "the-new-yorker-radio-hour",
"title": "The New Yorker Radio Hour",
"info": "The New Yorker Radio Hour is a weekly program presented by the magazine's editor, David Remnick, and produced by WNYC Studios and The New Yorker. Each episode features a diverse mix of interviews, profiles, storytelling, and an occasional burst of humor inspired by the magazine, and shaped by its writers, artists, and editors. This isn't a radio version of a magazine, but something all its own, reflecting the rich possibilities of audio storytelling and conversation. Theme music for the show was composed and performed by Merrill Garbus of tUnE-YArDs.",
"airtime": "SAT 10am-11am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-New-Yorker-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/tnyradiohour",
"meta": {
"site": "arts",
"source": "WNYC"
},
"link": "/radio/program/the-new-yorker-radio-hour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/id1050430296",
"tuneIn": "https://tunein.com/podcasts/WNYC-Podcasts/New-Yorker-Radio-Hour-p803804/",
"rss": "https://feeds.feedburner.com/newyorkerradiohour"
}
},
"the-sam-sanders-show": {
"id": "the-sam-sanders-show",
"title": "The Sam Sanders Show",
"info": "One of public radio's most dynamic voices, Sam Sanders helped launch The NPR Politics Podcast and hosted NPR's hit show It's Been A Minute. Now, the award-winning host returns with something brand new, The Sam Sanders Show. Every week, Sam Sanders and friends dig into the culture that shapes our lives: what's driving the biggest trends, how artists really think, and even the memes you can't stop scrolling past. Sam is beloved for his way of unpacking the world and bringing you up close to fresh currents and engaging conversations. The Sam Sanders Show is smart, funny and always a good time.",
"airtime": "FRI 12-1pm AND SAT 11am-12pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/11/The-Sam-Sanders-Show-Podcast-Tile-400x400-1.jpg",
"officialWebsiteLink": "https://www.kcrw.com/shows/the-sam-sanders-show/latest",
"meta": {
"site": "arts",
"source": "KCRW"
},
"link": "https://www.kcrw.com/shows/the-sam-sanders-show/latest",
"subscribe": {
"rss": "https://feed.cdnstream1.com/zjb/feed/download/ac/28/59/ac28594c-e1d0-4231-8728-61865cdc80e8.xml"
}
},
"the-splendid-table": {
"id": "the-splendid-table",
"title": "The Splendid Table",
"info": "\u003cem>The Splendid Table\u003c/em> hosts our nation's conversations about cooking, sustainability and food culture.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Splendid-Table-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.splendidtable.org/",
"airtime": "SUN 10-11 pm",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/the-splendid-table"
},
"this-american-life": {
"id": "this-american-life",
"title": "This American Life",
"info": "This American Life is a weekly public radio show, heard by 2.2 million people on more than 500 stations. Another 2.5 million people download the weekly podcast. It is hosted by Ira Glass, produced in collaboration with Chicago Public Media, delivered to stations by PRX The Public Radio Exchange, and has won all of the major broadcasting awards.",
"airtime": "SAT 12pm-1pm, 7pm-8pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/thisAmericanLife.png",
"officialWebsiteLink": "https://www.thisamericanlife.org/",
"meta": {
"site": "news",
"source": "wbez"
},
"link": "/radio/program/this-american-life",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=201671138&at=11l79Y&ct=nprdirectory",
"rss": "https://www.thisamericanlife.org/podcast/rss.xml"
}
},
"tinydeskradio": {
"id": "tinydeskradio",
"title": "Tiny Desk Radio",
"info": "We're bringing the best of Tiny Desk to the airwaves, only on public radio.",
"airtime": "SUN 8pm and SAT 9pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/04/300x300-For-Member-Station-Logo-Tiny-Desk-Radio-@2x.png",
"officialWebsiteLink": "https://www.npr.org/series/g-s1-52030/tiny-desk-radio",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/tinydeskradio",
"subscribe": {
"rss": "https://feeds.npr.org/g-s1-52030/rss.xml"
}
},
"wait-wait-dont-tell-me": {
"id": "wait-wait-dont-tell-me",
"title": "Wait Wait... Don't Tell Me!",
"info": "Peter Sagal and Bill Kurtis host the weekly NPR News quiz show alongside some of the best and brightest news and entertainment personalities.",
"airtime": "SUN 10am-11am, SAT 11am-12pm, SAT 6pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Wait-Wait-Podcast-Tile-300x300-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/wait-wait-dont-tell-me/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/wait-wait-dont-tell-me",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/Xogv",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=121493804&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Wait-Wait-Dont-Tell-Me-p46/",
"rss": "https://feeds.npr.org/344098539/podcast.xml"
}
},
"weekend-edition-saturday": {
"id": "weekend-edition-saturday",
"title": "Weekend Edition Saturday",
"info": "Weekend Edition Saturday wraps up the week's news and offers a mix of analysis and features on a wide range of topics, including arts, sports, entertainment, and human interest stories. The two-hour program is hosted by NPR's Peabody Award-winning Scott Simon.",
"airtime": "SAT 5am-10am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Weekend-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/weekend-edition-saturday/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/weekend-edition-saturday"
},
"weekend-edition-sunday": {
"id": "weekend-edition-sunday",
"title": "Weekend Edition Sunday",
"info": "Weekend Edition Sunday features interviews with newsmakers, artists, scientists, politicians, musicians, writers, theologians and historians. The program has covered news events from Nelson Mandela's 1990 release from a South African prison to the capture of Saddam Hussein.",
"airtime": "SUN 5am-10am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Weekend-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/weekend-edition-sunday/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/weekend-edition-sunday"
}
},
"racesReducer": {},
"racesGenElectionReducer": {},
"radioSchedulesReducer": {},
"listsReducer": {
"posts/news?tag=chatgpt": {
"isFetching": false,
"latestQuery": {
"from": 0,
"postsToRender": 9
},
"tag": null,
"vitalsOnly": true,
"totalRequested": 9,
"isLoading": false,
"isLoadingMore": true,
"total": {
"value": 23,
"relation": "eq"
},
"items": [
"news_12066910",
"news_12066171",
"news_12063401",
"news_12060365",
"news_12059714",
"news_12058013",
"news_12057720",
"news_12054490",
"news_12052617"
]
}
},
"recallGuideReducer": {
"intros": {},
"policy": {},
"candidates": {}
},
"savedArticleReducer": {
"articles": [],
"status": {}
},
"pfsSessionReducer": {},
"subscriptionsReducer": {},
"termsReducer": {
"about": {
"name": "About",
"type": "terms",
"id": "about",
"slug": "about",
"link": "/about",
"taxonomy": "site"
},
"arts": {
"name": "Arts & Culture",
"grouping": [
"arts",
"pop",
"trulyca"
],
"description": "KQED Arts provides daily in-depth coverage of the Bay Area's music, art, film, performing arts, literature and arts news, as well as cultural commentary and criticism.",
"type": "terms",
"id": "arts",
"slug": "arts",
"link": "/arts",
"taxonomy": "site"
},
"artschool": {
"name": "Art School",
"parent": "arts",
"type": "terms",
"id": "artschool",
"slug": "artschool",
"link": "/artschool",
"taxonomy": "site"
},
"bayareabites": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"parent": "food",
"type": "terms",
"id": "bayareabites",
"slug": "bayareabites",
"link": "/food",
"taxonomy": "site"
},
"bayareahiphop": {
"name": "Bay Area Hiphop",
"type": "terms",
"id": "bayareahiphop",
"slug": "bayareahiphop",
"link": "/bayareahiphop",
"taxonomy": "site"
},
"campaign21": {
"name": "Campaign 21",
"type": "terms",
"id": "campaign21",
"slug": "campaign21",
"link": "/campaign21",
"taxonomy": "site"
},
"checkplease": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"parent": "food",
"type": "terms",
"id": "checkplease",
"slug": "checkplease",
"link": "/food",
"taxonomy": "site"
},
"education": {
"name": "Education",
"grouping": [
"education"
],
"type": "terms",
"id": "education",
"slug": "education",
"link": "/education",
"taxonomy": "site"
},
"elections": {
"name": "Elections",
"type": "terms",
"id": "elections",
"slug": "elections",
"link": "/elections",
"taxonomy": "site"
},
"events": {
"name": "Events",
"type": "terms",
"id": "events",
"slug": "events",
"link": "/events",
"taxonomy": "site"
},
"event": {
"name": "Event",
"alias": "events",
"type": "terms",
"id": "event",
"slug": "event",
"link": "/event",
"taxonomy": "site"
},
"filmschoolshorts": {
"name": "Film School Shorts",
"type": "terms",
"id": "filmschoolshorts",
"slug": "filmschoolshorts",
"link": "/filmschoolshorts",
"taxonomy": "site"
},
"food": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"type": "terms",
"id": "food",
"slug": "food",
"link": "/food",
"taxonomy": "site"
},
"forum": {
"name": "Forum",
"relatedContentQuery": "posts/forum?",
"parent": "news",
"type": "terms",
"id": "forum",
"slug": "forum",
"link": "/forum",
"taxonomy": "site"
},
"futureofyou": {
"name": "Future of You",
"grouping": [
"science",
"futureofyou"
],
"parent": "science",
"type": "terms",
"id": "futureofyou",
"slug": "futureofyou",
"link": "/futureofyou",
"taxonomy": "site"
},
"jpepinheart": {
"name": "KQED food",
"relatedContentQuery": "posts/food,bayareabites,checkplease",
"parent": "food",
"type": "terms",
"id": "jpepinheart",
"slug": "jpepinheart",
"link": "/food",
"taxonomy": "site"
},
"liveblog": {
"name": "Live Blog",
"type": "terms",
"id": "liveblog",
"slug": "liveblog",
"link": "/liveblog",
"taxonomy": "site"
},
"livetv": {
"name": "Live TV",
"parent": "tv",
"type": "terms",
"id": "livetv",
"slug": "livetv",
"link": "/livetv",
"taxonomy": "site"
},
"lowdown": {
"name": "The Lowdown",
"relatedContentQuery": "posts/lowdown?",
"parent": "news",
"type": "terms",
"id": "lowdown",
"slug": "lowdown",
"link": "/lowdown",
"taxonomy": "site"
},
"mindshift": {
"name": "Mindshift",
"parent": "news",
"description": "MindShift explores the future of education by highlighting the innovative – and sometimes counterintuitive – ways educators and parents are helping all children succeed.",
"type": "terms",
"id": "mindshift",
"slug": "mindshift",
"link": "/mindshift",
"taxonomy": "site"
},
"news": {
"name": "News",
"grouping": [
"news",
"forum"
],
"type": "terms",
"id": "news",
"slug": "news",
"link": "/news",
"taxonomy": "site"
},
"perspectives": {
"name": "Perspectives",
"parent": "radio",
"type": "terms",
"id": "perspectives",
"slug": "perspectives",
"link": "/perspectives",
"taxonomy": "site"
},
"podcasts": {
"name": "Podcasts",
"type": "terms",
"id": "podcasts",
"slug": "podcasts",
"link": "/podcasts",
"taxonomy": "site"
},
"pop": {
"name": "Pop",
"parent": "arts",
"type": "terms",
"id": "pop",
"slug": "pop",
"link": "/pop",
"taxonomy": "site"
},
"pressroom": {
"name": "Pressroom",
"type": "terms",
"id": "pressroom",
"slug": "pressroom",
"link": "/pressroom",
"taxonomy": "site"
},
"quest": {
"name": "Quest",
"parent": "science",
"type": "terms",
"id": "quest",
"slug": "quest",
"link": "/quest",
"taxonomy": "site"
},
"radio": {
"name": "Radio",
"grouping": [
"forum",
"perspectives"
],
"description": "Listen to KQED Public Radio – home of Forum and The California Report – on 88.5 FM in San Francisco, 89.3 FM in Sacramento, 88.3 FM in Santa Rosa and 88.1 FM in Martinez.",
"type": "terms",
"id": "radio",
"slug": "radio",
"link": "/radio",
"taxonomy": "site"
},
"root": {
"name": "KQED",
"image": "https://ww2.kqed.org/app/uploads/2020/02/KQED-OG-Image@1x.png",
"imageWidth": 1200,
"imageHeight": 630,
"headData": {
"title": "KQED | News, Radio, Podcasts, TV | Public Media for Northern California",
"description": "KQED provides public radio, television, and independent reporting on issues that matter to the Bay Area. We’re the NPR and PBS member station for Northern California."
},
"type": "terms",
"id": "root",
"slug": "root",
"link": "/root",
"taxonomy": "site"
},
"science": {
"name": "Science",
"grouping": [
"science",
"futureofyou"
],
"description": "KQED Science brings you award-winning science and environment coverage from the Bay Area and beyond.",
"type": "terms",
"id": "science",
"slug": "science",
"link": "/science",
"taxonomy": "site"
},
"stateofhealth": {
"name": "State of Health",
"parent": "science",
"type": "terms",
"id": "stateofhealth",
"slug": "stateofhealth",
"link": "/stateofhealth",
"taxonomy": "site"
},
"support": {
"name": "Support",
"type": "terms",
"id": "support",
"slug": "support",
"link": "/support",
"taxonomy": "site"
},
"thedolist": {
"name": "The Do List",
"parent": "arts",
"type": "terms",
"id": "thedolist",
"slug": "thedolist",
"link": "/thedolist",
"taxonomy": "site"
},
"trulyca": {
"name": "Truly CA",
"grouping": [
"arts",
"pop",
"trulyca"
],
"parent": "arts",
"type": "terms",
"id": "trulyca",
"slug": "trulyca",
"link": "/trulyca",
"taxonomy": "site"
},
"tv": {
"name": "TV",
"type": "terms",
"id": "tv",
"slug": "tv",
"link": "/tv",
"taxonomy": "site"
},
"voterguide": {
"name": "Voter Guide",
"parent": "elections",
"alias": "elections",
"type": "terms",
"id": "voterguide",
"slug": "voterguide",
"link": "/voterguide",
"taxonomy": "site"
},
"guiaelectoral": {
"name": "Guia Electoral",
"parent": "elections",
"alias": "elections",
"type": "terms",
"id": "guiaelectoral",
"slug": "guiaelectoral",
"link": "/guiaelectoral",
"taxonomy": "site"
},
"news_32668": {
"type": "terms",
"id": "news_32668",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "32668",
"found": true
},
"relationships": {},
"featImg": null,
"name": "ChatGPT",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "ChatGPT Archives | KQED News",
"ogDescription": null,
"imageData": {
"ogImageSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"width": 1200,
"height": 630
},
"twImageSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png"
},
"twitterCard": "summary_large_image"
}
},
"ttid": 32685,
"slug": "chatgpt",
"isLoading": false,
"link": "/news/tag/chatgpt"
},
"source_news_12057720": {
"type": "terms",
"id": "source_news_12057720",
"meta": {
"override": true
},
"name": "The California Report",
"link": "https://www.kqed.org/news/tag/tcrarchive/",
"isLoading": false
},
"news_31795": {
"type": "terms",
"id": "news_31795",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "31795",
"found": true
},
"relationships": {},
"featImg": null,
"name": "California",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "California Archives | KQED News",
"ogDescription": null
},
"ttid": 31812,
"slug": "california",
"isLoading": false,
"link": "/news/category/california"
},
"news_8": {
"type": "terms",
"id": "news_8",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "8",
"found": true
},
"relationships": {},
"featImg": null,
"name": "News",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "News Archives | KQED News",
"ogDescription": null
},
"ttid": 8,
"slug": "news",
"isLoading": false,
"link": "/news/category/news"
},
"news_13": {
"type": "terms",
"id": "news_13",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "13",
"found": true
},
"relationships": {},
"name": "Politics",
"slug": "politics",
"taxonomy": "category",
"description": null,
"featImg": null,
"headData": {
"title": "Politics | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 13,
"isLoading": false,
"link": "/news/category/politics"
},
"news_248": {
"type": "terms",
"id": "news_248",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "248",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Technology",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Technology Archives | KQED News",
"ogDescription": null
},
"ttid": 256,
"slug": "technology",
"isLoading": false,
"link": "/news/category/technology"
},
"news_25184": {
"type": "terms",
"id": "news_25184",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "25184",
"found": true
},
"relationships": {},
"featImg": null,
"name": "AI",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "AI Archives | KQED News",
"ogDescription": null
},
"ttid": 25201,
"slug": "ai",
"isLoading": false,
"link": "/news/tag/ai"
},
"news_32664": {
"type": "terms",
"id": "news_32664",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "32664",
"found": true
},
"relationships": {},
"name": "AI software",
"slug": "ai-software",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "AI software | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 32681,
"isLoading": false,
"link": "/news/tag/ai-software"
},
"news_34755": {
"type": "terms",
"id": "news_34755",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34755",
"found": true
},
"relationships": {},
"name": "artificial intelligence",
"slug": "artificial-intelligence",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "artificial intelligence | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 34772,
"isLoading": false,
"link": "/news/tag/artificial-intelligence"
},
"news_1386": {
"type": "terms",
"id": "news_1386",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "1386",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Bay Area",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Bay Area Archives | KQED News",
"ogDescription": null
},
"ttid": 1398,
"slug": "bay-area",
"isLoading": false,
"link": "/news/tag/bay-area"
},
"news_18538": {
"type": "terms",
"id": "news_18538",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "18538",
"found": true
},
"relationships": {},
"featImg": null,
"name": "California",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "California Archives | KQED News",
"ogDescription": null
},
"ttid": 31,
"slug": "california",
"isLoading": false,
"link": "/news/tag/california"
},
"news_1323": {
"type": "terms",
"id": "news_1323",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "1323",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Donald Trump",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Donald Trump Archives | KQED News",
"ogDescription": null
},
"ttid": 1335,
"slug": "donald-trump",
"isLoading": false,
"link": "/news/tag/donald-trump"
},
"news_17968": {
"type": "terms",
"id": "news_17968",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "17968",
"found": true
},
"relationships": {},
"name": "Politics",
"slug": "politics",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Politics | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 18002,
"isLoading": false,
"link": "/news/tag/politics"
},
"news_34586": {
"type": "terms",
"id": "news_34586",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34586",
"found": true
},
"relationships": {},
"name": "Silicon Valley",
"slug": "silicon-valley",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Silicon Valley | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 34603,
"isLoading": false,
"link": "/news/tag/silicon-valley"
},
"news_21285": {
"type": "terms",
"id": "news_21285",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "21285",
"found": true
},
"relationships": {},
"featImg": null,
"name": "South Bay",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "South Bay Archives | KQED News",
"ogDescription": null
},
"ttid": 21302,
"slug": "south-bay",
"isLoading": false,
"link": "/news/tag/south-bay"
},
"news_1631": {
"type": "terms",
"id": "news_1631",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "1631",
"found": true
},
"relationships": {},
"name": "Technology",
"slug": "technology",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Technology | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 1643,
"isLoading": false,
"link": "/news/tag/technology"
},
"news_33733": {
"type": "terms",
"id": "news_33733",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33733",
"found": true
},
"relationships": {},
"featImg": null,
"name": "News",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "News Archives | KQED News",
"ogDescription": null
},
"ttid": 33750,
"slug": "news",
"isLoading": false,
"link": "/news/interest/news"
},
"news_33731": {
"type": "terms",
"id": "news_33731",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33731",
"found": true
},
"relationships": {},
"featImg": null,
"name": "South Bay",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "South Bay Archives | KQED News",
"ogDescription": null
},
"ttid": 33748,
"slug": "south-bay",
"isLoading": false,
"link": "/news/interest/south-bay"
},
"news_33732": {
"type": "terms",
"id": "news_33732",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33732",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Technology",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Technology Archives | KQED News",
"ogDescription": null
},
"ttid": 33749,
"slug": "technology",
"isLoading": false,
"link": "/news/interest/technology"
},
"news_34167": {
"type": "terms",
"id": "news_34167",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34167",
"found": true
},
"relationships": {},
"name": "Criminal Justice",
"slug": "criminal-justice",
"taxonomy": "category",
"description": null,
"featImg": null,
"headData": {
"title": "Criminal Justice Archives | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 34184,
"isLoading": false,
"link": "/news/category/criminal-justice"
},
"news_6188": {
"type": "terms",
"id": "news_6188",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "6188",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Law and Justice",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Law and Justice Archives | KQED News",
"ogDescription": null
},
"ttid": 6212,
"slug": "law-and-justice",
"isLoading": false,
"link": "/news/category/law-and-justice"
},
"news_28250": {
"type": "terms",
"id": "news_28250",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "28250",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Local",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Local Archives | KQED News",
"ogDescription": null
},
"ttid": 28267,
"slug": "local",
"isLoading": false,
"link": "/news/category/local"
},
"news_19954": {
"type": "terms",
"id": "news_19954",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "19954",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Law and Justice",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Law and Justice Archives | KQED News",
"ogDescription": null
},
"ttid": 19971,
"slug": "law-and-justice",
"isLoading": false,
"link": "/news/tag/law-and-justice"
},
"news_35758": {
"type": "terms",
"id": "news_35758",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "35758",
"found": true
},
"relationships": {},
"name": "Open AI",
"slug": "open-ai",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Open AI | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 35775,
"isLoading": false,
"link": "/news/tag/open-ai"
},
"news_33542": {
"type": "terms",
"id": "news_33542",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33542",
"found": true
},
"relationships": {},
"featImg": null,
"name": "OpenAI",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "OpenAI Archives | KQED News",
"ogDescription": null
},
"ttid": 33559,
"slug": "openai",
"isLoading": false,
"link": "/news/tag/openai"
},
"news_38": {
"type": "terms",
"id": "news_38",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "38",
"found": true
},
"relationships": {},
"featImg": null,
"name": "San Francisco",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "San Francisco Archives | KQED News",
"ogDescription": null
},
"ttid": 58,
"slug": "san-francisco",
"isLoading": false,
"link": "/news/tag/san-francisco"
},
"news_33729": {
"type": "terms",
"id": "news_33729",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33729",
"found": true
},
"relationships": {},
"featImg": null,
"name": "San Francisco",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "San Francisco Archives | KQED News",
"ogDescription": null
},
"ttid": 33746,
"slug": "san-francisco",
"isLoading": false,
"link": "/news/interest/san-francisco"
},
"news_22434": {
"type": "terms",
"id": "news_22434",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "22434",
"found": true
},
"relationships": {},
"featImg": null,
"name": "death",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "death Archives | KQED News",
"ogDescription": null
},
"ttid": 22451,
"slug": "death",
"isLoading": false,
"link": "/news/tag/death"
},
"news_23333": {
"type": "terms",
"id": "news_23333",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "23333",
"found": true
},
"relationships": {},
"featImg": null,
"name": "families",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "families Archives | KQED News",
"ogDescription": null
},
"ttid": 23350,
"slug": "families",
"isLoading": false,
"link": "/news/tag/families"
},
"news_18543": {
"type": "terms",
"id": "news_18543",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "18543",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Health",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Health Archives | KQED News",
"ogDescription": null
},
"ttid": 466,
"slug": "health",
"isLoading": false,
"link": "/news/tag/health"
},
"news_21891": {
"type": "terms",
"id": "news_21891",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "21891",
"found": true
},
"relationships": {},
"featImg": null,
"name": "lawsuits",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "lawsuits Archives | KQED News",
"ogDescription": null
},
"ttid": 21908,
"slug": "lawsuits",
"isLoading": false,
"link": "/news/tag/lawsuits"
},
"news_2109": {
"type": "terms",
"id": "news_2109",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "2109",
"found": true
},
"relationships": {},
"featImg": null,
"name": "mental health",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "mental health Archives | KQED News",
"ogDescription": null
},
"ttid": 2124,
"slug": "mental-health",
"isLoading": false,
"link": "/news/tag/mental-health"
},
"news_33543": {
"type": "terms",
"id": "news_33543",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33543",
"found": true
},
"relationships": {},
"name": "Sam Altman",
"slug": "sam-altman",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Sam Altman | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null,
"metaRobotsNoIndex": "noindex"
},
"ttid": 33560,
"isLoading": false,
"link": "/news/tag/sam-altman"
},
"news_2883": {
"type": "terms",
"id": "news_2883",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "2883",
"found": true
},
"relationships": {},
"featImg": null,
"name": "suicide",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "suicide Archives | KQED News",
"ogDescription": null
},
"ttid": 2901,
"slug": "suicide",
"isLoading": false,
"link": "/news/tag/suicide"
},
"news_21121": {
"type": "terms",
"id": "news_21121",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "21121",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Teenagers",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Teenagers Archives | KQED News",
"ogDescription": null
},
"ttid": 21138,
"slug": "teenagers",
"isLoading": false,
"link": "/news/tag/teenagers"
},
"news_20385": {
"type": "terms",
"id": "news_20385",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "20385",
"found": true
},
"relationships": {},
"featImg": null,
"name": "teens",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "teens Archives | KQED News",
"ogDescription": null
},
"ttid": 20402,
"slug": "teens",
"isLoading": false,
"link": "/news/tag/teens"
},
"news_33747": {
"type": "terms",
"id": "news_33747",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33747",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Health",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Health Archives | KQED News",
"ogDescription": null
},
"ttid": 33764,
"slug": "health",
"isLoading": false,
"link": "/news/interest/health"
},
"news_29886": {
"type": "terms",
"id": "news_29886",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "29886",
"found": true
},
"relationships": {},
"featImg": null,
"name": "children's health",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "children's health Archives | KQED News",
"ogDescription": null
},
"ttid": 29903,
"slug": "childrens-health",
"isLoading": false,
"link": "/news/tag/childrens-health"
},
"news_22456": {
"type": "terms",
"id": "news_22456",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "22456",
"found": true
},
"relationships": {},
"featImg": null,
"name": "public safety",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "public safety Archives | KQED News",
"ogDescription": null
},
"ttid": 22473,
"slug": "public-safety",
"isLoading": false,
"link": "/news/tag/public-safety"
},
"news_22307": {
"type": "terms",
"id": "news_22307",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "22307",
"found": true
},
"relationships": {},
"featImg": null,
"name": "california laws",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "california laws Archives | KQED News",
"ogDescription": null
},
"ttid": 22324,
"slug": "california-laws",
"isLoading": false,
"link": "/news/tag/california-laws"
},
"news_30826": {
"type": "terms",
"id": "news_30826",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "30826",
"found": true
},
"relationships": {},
"featImg": null,
"name": "children's mental health",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "children's mental health Archives | KQED News",
"ogDescription": null
},
"ttid": 30843,
"slug": "childrens-mental-health",
"isLoading": false,
"link": "/news/tag/childrens-mental-health"
},
"news_16": {
"type": "terms",
"id": "news_16",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "16",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Gavin Newsom",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Gavin Newsom Archives | KQED News",
"ogDescription": null
},
"ttid": 16,
"slug": "gavin-newsom",
"isLoading": false,
"link": "/news/tag/gavin-newsom"
},
"news_34532": {
"type": "terms",
"id": "news_34532",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34532",
"found": true
},
"relationships": {},
"name": "new bills",
"slug": "new-bills",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "new bills | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null,
"metaRobotsNoIndex": "noindex"
},
"ttid": 34549,
"isLoading": false,
"link": "/news/tag/new-bills"
},
"news_27626": {
"type": "terms",
"id": "news_27626",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "27626",
"found": true
},
"relationships": {},
"featImg": null,
"name": "featured-news",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "featured-news Archives | KQED News",
"ogDescription": null
},
"ttid": 27643,
"slug": "featured-news",
"isLoading": false,
"link": "/news/tag/featured-news"
},
"news_33738": {
"type": "terms",
"id": "news_33738",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33738",
"found": true
},
"relationships": {},
"featImg": null,
"name": "California",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "California Archives | KQED News",
"ogDescription": null
},
"ttid": 33755,
"slug": "california",
"isLoading": false,
"link": "/news/interest/california"
},
"news_72": {
"type": "terms",
"id": "news_72",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "72",
"found": true
},
"relationships": {},
"featImg": "https://ww2.kqed.org/app/uploads/sites/10/2014/10/TCR-2-Logo-Web-Banners-03.png",
"name": "The California Report",
"description": null,
"taxonomy": "program",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "The California Report Archives | KQED News",
"ogDescription": null
},
"ttid": 6969,
"slug": "the-california-report",
"isLoading": false,
"link": "/news/program/the-california-report"
},
"news_33520": {
"type": "terms",
"id": "news_33520",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33520",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Podcast",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Podcast Archives | KQED News",
"ogDescription": null
},
"ttid": 33537,
"slug": "podcast",
"isLoading": false,
"link": "/news/category/podcast"
},
"news_34018": {
"type": "terms",
"id": "news_34018",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34018",
"found": true
},
"relationships": {},
"featImg": null,
"name": "tcr",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "tcr Archives | KQED News",
"ogDescription": null
},
"ttid": 34035,
"slug": "tcr",
"isLoading": false,
"link": "/news/category/tcr"
},
"news_35915": {
"type": "terms",
"id": "news_35915",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "35915",
"found": true
},
"relationships": {},
"name": "exosphere",
"slug": "exosphere",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "exosphere | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 35932,
"isLoading": false,
"link": "/news/tag/exosphere"
},
"news_35910": {
"type": "terms",
"id": "news_35910",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "35910",
"found": true
},
"relationships": {},
"name": "immigration agents",
"slug": "immigration-agents",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "immigration agents | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 35927,
"isLoading": false,
"link": "/news/tag/immigration-agents"
},
"news_35916": {
"type": "terms",
"id": "news_35916",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "35916",
"found": true
},
"relationships": {},
"name": "LA wildfires",
"slug": "la-wildfires",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "LA wildfires | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 35933,
"isLoading": false,
"link": "/news/tag/la-wildfires"
},
"news_34761": {
"type": "terms",
"id": "news_34761",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34761",
"found": true
},
"relationships": {},
"name": "pomona",
"slug": "pomona",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "pomona | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null,
"metaRobotsNoIndex": "noindex"
},
"ttid": 34778,
"isLoading": false,
"link": "/news/tag/pomona"
},
"news_35914": {
"type": "terms",
"id": "news_35914",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "35914",
"found": true
},
"relationships": {},
"name": "SpaceX Falcon 9",
"slug": "spacex-falcon-9",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "SpaceX Falcon 9 | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 35931,
"isLoading": false,
"link": "/news/tag/spacex-falcon-9"
},
"news_21998": {
"type": "terms",
"id": "news_21998",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "21998",
"found": true
},
"relationships": {},
"featImg": null,
"name": "TCRAM",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "TCRAM Archives | KQED News",
"ogDescription": null
},
"ttid": 22015,
"slug": "tcram",
"isLoading": false,
"link": "/news/tag/tcram"
},
"news_21268": {
"type": "terms",
"id": "news_21268",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "21268",
"found": true
},
"relationships": {},
"featImg": null,
"name": "tcrarchive",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "tcrarchive Archives | KQED News",
"ogDescription": null
},
"ttid": 21285,
"slug": "tcrarchive",
"isLoading": false,
"link": "/news/tag/tcrarchive"
},
"news_689": {
"type": "terms",
"id": "news_689",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "689",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Parenting",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Parenting Archives | KQED News",
"ogDescription": null
},
"ttid": 698,
"slug": "parenting",
"isLoading": false,
"link": "/news/tag/parenting"
}
},
"userAgentReducer": {
"userAgent": "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko; compatible; ClaudeBot/1.0; +claudebot@anthropic.com)",
"isBot": true
},
"userPermissionsReducer": {
"wpLoggedIn": false
},
"localStorageReducer": {},
"browserHistoryReducer": [],
"eventsReducer": {},
"fssReducer": {},
"tvDailyScheduleReducer": {},
"tvWeeklyScheduleReducer": {},
"tvPrimetimeScheduleReducer": {},
"tvMonthlyScheduleReducer": {},
"userAccountReducer": {
"user": {
"email": null,
"emailStatus": "EMAIL_UNVALIDATED",
"loggedStatus": "LOGGED_OUT",
"loggingChecked": false,
"articles": [],
"firstName": null,
"lastName": null,
"phoneNumber": null,
"fetchingMembership": false,
"membershipError": false,
"memberships": [
{
"id": null,
"startDate": null,
"firstName": null,
"lastName": null,
"familyNumber": null,
"memberNumber": null,
"memberSince": null,
"expirationDate": null,
"pfsEligible": false,
"isSustaining": false,
"membershipLevel": "Prospect",
"membershipStatus": "Non Member",
"lastGiftDate": null,
"renewalDate": null,
"lastDonationAmount": null
}
]
},
"authModal": {
"isOpen": false,
"view": "LANDING_VIEW"
},
"error": null
},
"youthMediaReducer": {},
"checkPleaseReducer": {
"filterData": {},
"restaurantData": []
},
"location": {
"pathname": "/news/tag/chatgpt",
"previousPathname": "/"
}
}