window.__IS_SSR__=true
window.__INITIAL_STATE__={
"attachmentsReducer": {
"audio_0": {
"type": "attachments",
"id": "audio_0",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background0.jpg"
}
}
},
"audio_1": {
"type": "attachments",
"id": "audio_1",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background1.jpg"
}
}
},
"audio_2": {
"type": "attachments",
"id": "audio_2",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background2.jpg"
}
}
},
"audio_3": {
"type": "attachments",
"id": "audio_3",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background3.jpg"
}
}
},
"audio_4": {
"type": "attachments",
"id": "audio_4",
"imgSizes": {
"kqedFullSize": {
"file": "https://ww2.kqed.org/news/wp-content/themes/KQED-unified/img/audio_bgs/background4.jpg"
}
}
},
"placeholder": {
"type": "attachments",
"id": "placeholder",
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"medium_large": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-768x512.jpg",
"width": 768,
"height": 512,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"fd-lrg": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"fd-med": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"fd-sm": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"xxsmall": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"xsmall": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"small": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"xlarge": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1-1920x1280.jpg",
"width": 1920,
"height": 1280,
"mimeType": "image/jpeg"
},
"guest-author-32": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 32,
"height": 32,
"mimeType": "image/jpeg"
},
"guest-author-50": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 50,
"height": 50,
"mimeType": "image/jpeg"
},
"guest-author-64": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 64,
"height": 64,
"mimeType": "image/jpeg"
},
"guest-author-96": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 96,
"height": 96,
"mimeType": "image/jpeg"
},
"guest-author-128": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 128,
"height": 128,
"mimeType": "image/jpeg"
},
"detail": {
"file": "https://cdn.kqed.org/wp-content/uploads/2025/01/KQED-Default-Image-816638274-1333x1333-1-160x160.jpg",
"width": 160,
"height": 160,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2024/12/KQED-Default-Image-816638274-2000x1333-1.jpg",
"width": 2000,
"height": 1333
}
}
},
"news_11999342": {
"type": "attachments",
"id": "news_11999342",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "11999342",
"found": true
},
"title": "2024 Republican National Convention TW",
"publishDate": 1723074225,
"status": "inherit",
"parent": 11999338,
"modified": 1764708406,
"caption": "David Sacks, a venture capitalist, and Vice President JD Vance are seen in Fiserv Forum on the first day of Republican National Convention in Milwaukee, Wis., on Monday, July 15, 2024.",
"credit": "Tom Williams/Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/GettyImages-2163834989-800x533.jpg",
"width": 800,
"height": 533,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/GettyImages-2163834989-1020x680.jpg",
"width": 1020,
"height": 680,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/GettyImages-2163834989-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/GettyImages-2163834989-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"2048x2048": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/GettyImages-2163834989-2048x1365.jpg",
"width": 2048,
"height": 1365,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/GettyImages-2163834989-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/GettyImages-2163834989-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/GettyImages-2163834989-1920x1280.jpg",
"width": 1920,
"height": 1280,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/08/GettyImages-2163834989-scaled.jpg",
"width": 2560,
"height": 1707
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12063588": {
"type": "attachments",
"id": "news_12063588",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12063588",
"found": true
},
"title": "Data Centers Proliferate And Cause Controversy",
"publishDate": 1762781567,
"status": "inherit",
"parent": 12063587,
"modified": 1762781598,
"caption": "VERNON, CALIFORNIA - OCTOBER 20: An aerial view of a 33 megawatt data center with closed-loop cooling system on October 20, 2025 in Vernon, California. A surge in demand for AI infrastructure is fueling a boom in data centers across the country and around the globe. ",
"credit": "Photo by Mario Tama/Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/GettyImages-2242294580-2000x1392.jpg",
"width": 2000,
"height": 1392,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/GettyImages-2242294580-2000x1392.jpg",
"width": 2000,
"height": 1392,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/GettyImages-2242294580-160x111.jpg",
"width": 160,
"height": 111,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/GettyImages-2242294580-1536x1069.jpg",
"width": 1536,
"height": 1069,
"mimeType": "image/jpeg"
},
"2048x2048": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/GettyImages-2242294580-2048x1425.jpg",
"width": 2048,
"height": 1425,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/GettyImages-2242294580-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/GettyImages-2242294580-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/GettyImages-2242294580-2000x1392.jpg",
"width": 2000,
"height": 1392,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/GettyImages-2242294580-1200x675.jpg",
"width": 1200,
"height": 675,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/11/GettyImages-2242294580-scaled.jpg",
"width": 2560,
"height": 1782
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12061541": {
"type": "attachments",
"id": "news_12061541",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12061541",
"found": true
},
"title": "AIPolice",
"publishDate": 1761335653,
"status": "inherit",
"parent": 12061462,
"modified": 1761335688,
"caption": "Fresno Police Officer Gregory Colon-Reyes demonstrates Draft One on Sept. 24, 2024, an AI tool that uses the audio from bodycam footage to generate police reports.",
"credit": "Courtesy of Gary Kazanjian",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/AIPolice-160x114.jpg",
"width": 160,
"height": 114,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/AIPolice-1536x1095.jpg",
"width": 1536,
"height": 1095,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/AIPolice-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/AIPolice-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"npr-cds-wide": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/AIPolice-1600x900.jpg",
"width": 1600,
"height": 900,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/AIPolice.jpg",
"width": 2000,
"height": 1426
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12060375": {
"type": "attachments",
"id": "news_12060375",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12060375",
"found": true
},
"title": "US-TECH-AI-ALTMAN",
"publishDate": 1760733500,
"status": "inherit",
"parent": 12060365,
"modified": 1760733569,
"caption": "OpenAI CEO Sam Altman speaks at OpenAI DevDay, the company's annual conference for developers, in San Francisco, California, on Oct. 6, 2025. ",
"credit": "Benjamin Legendre/AFP via Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty-160x108.jpg",
"width": 160,
"height": 108,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty-1536x1034.jpg",
"width": 1536,
"height": 1034,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty.jpg",
"width": 2000,
"height": 1347
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12059912": {
"type": "attachments",
"id": "news_12059912",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12059912",
"found": true
},
"title": "AI_Con_thumbnail",
"publishDate": 1760479501,
"status": "inherit",
"parent": 12059911,
"modified": 1760515545,
"caption": "Composite image of robotic hands hovering over a laptop showing an error screen. ",
"credit": "Composite by Gabriela Glueck; photos by Rawf8, MASTER, and Vertigo3d",
"altTag": "White, purple, and black robot hands hover over a laptop. The laptop is warped and appears distorted. The laptop screen shows that the laptop has crashed. These components are layered over a purple and green background featuring keys labeled “CHAT AI BOT.”",
"description": null,
"imgSizes": {
"medium": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/AI_Con_thumbnail-2000x1125.jpg",
"width": 2000,
"height": 1125,
"mimeType": "image/jpeg"
},
"large": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/AI_Con_thumbnail-2000x1125.jpg",
"width": 2000,
"height": 1125,
"mimeType": "image/jpeg"
},
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/AI_Con_thumbnail-160x90.jpg",
"width": 160,
"height": 90,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/AI_Con_thumbnail-1536x864.jpg",
"width": 1536,
"height": 864,
"mimeType": "image/jpeg"
},
"2048x2048": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/AI_Con_thumbnail-2048x1152.jpg",
"width": 2048,
"height": 1152,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/AI_Con_thumbnail-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/AI_Con_thumbnail-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/AI_Con_thumbnail-2000x1125.jpg",
"width": 2000,
"height": 1125,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/AI_Con_thumbnail-scaled.jpg",
"width": 2560,
"height": 1440
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12051437": {
"type": "attachments",
"id": "news_12051437",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12051437",
"found": true
},
"title": "GavinNewsomAISF1",
"publishDate": 1754670249,
"status": "inherit",
"parent": 12051433,
"modified": 1754670737,
"caption": "On the rooftop of Google’s San Francisco offices on Aug. 7, 2025, Gov. Gavin Newsom announced a major statewide partnership with Google, Microsoft, IBM and Adobe to expand generative AI education — including training programs, certifications and internships — across California’s high schools, community colleges and Cal State universities.",
"credit": "Courtesy of the Office of the Governor",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1.jpg",
"width": 2000,
"height": 1333
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12051438": {
"type": "attachments",
"id": "news_12051438",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12051438",
"found": true
},
"title": "GavinNewsomAISF2",
"publishDate": 1754670252,
"status": "inherit",
"parent": 12051433,
"modified": 1754670418,
"caption": "On the rooftop of Google’s San Francisco Embarcadero offices on Aug. 7, 2025, Gov. Gavin Newsom announced a major statewide partnership with Google, Microsoft, IBM and Adobe to expand generative AI education — including training programs, certifications, and internships — across California’s high schools, community colleges and Cal State universities.",
"credit": "Courtesy of the Office of the Governor",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF2-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF2-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF2-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF2-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF2.jpg",
"width": 2000,
"height": 1333
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12055158": {
"type": "attachments",
"id": "news_12055158",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12055158",
"found": true
},
"title": "US-TECHNOLOGY-COMPUTERS-INTERNET-AI-ANTHROPIC",
"publishDate": 1757358511,
"status": "inherit",
"parent": 12055125,
"modified": 1757358578,
"caption": "From left to right: Anthropic CEO Dario Amodei, Chief Product Officer Mike Krieger and Head of Communications Sasha de Marigny give a press conference during Anthropic's first developer conference in San Francisco, California, on May 22, 2025. ",
"credit": "Julie Jammot/AFP via Getty Images",
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/AnthropicAIGetty-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"1536x1536": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/AnthropicAIGetty-1536x1024.jpg",
"width": 1536,
"height": 1024,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/AnthropicAIGetty-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/AnthropicAIGetty-1038x576.jpg",
"width": 1038,
"height": 576,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/AnthropicAIGetty.jpg",
"width": 2000,
"height": 1333
}
},
"fetchFailed": false,
"isLoading": false
},
"news_12054444": {
"type": "attachments",
"id": "news_12054444",
"meta": {
"index": "attachments_1716263798",
"site": "news",
"id": "12054444",
"found": true
},
"title": "President Trump Delivers Remarks, Announces Infrastructure Plan At White House",
"publishDate": 1756916263,
"status": "inherit",
"parent": 12054417,
"modified": 1756916263,
"caption": "WASHINGTON, DC - JANUARY 21: OpenAI CEO Sam Altman, accompanied by U.S. President Donald Trump, Oracle co-founder, CTO and Executive Chairman Larry Ellison (R), and SoftBank CEO Masayoshi Son (2nd-R), speaks during a news conference in the Roosevelt Room of the White House on January 21, 2025 in Washington, DC. Trump announced an investment in artificial intelligence (AI) infrastructure and took questions on a range of topics including his presidential pardons of Jan. 6 defendants, the war in Ukraine, cryptocurrencies and other topics. (Photo by Andrew Harnik/Getty Images)",
"credit": null,
"altTag": null,
"description": null,
"imgSizes": {
"thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2194584857-160x107.jpg",
"width": 160,
"height": 107,
"mimeType": "image/jpeg"
},
"post-thumbnail": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2194584857-672x372.jpg",
"width": 672,
"height": 372,
"mimeType": "image/jpeg"
},
"twentyfourteen-full-width": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2194584857-1024x576.jpg",
"width": 1024,
"height": 576,
"mimeType": "image/jpeg"
},
"kqedFullSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2194584857.jpg",
"width": 1024,
"height": 683
}
},
"fetchFailed": false,
"isLoading": false
}
},
"audioPlayerReducer": {
"postId": "stream_live",
"isPaused": true,
"isPlaying": false,
"pfsActive": false,
"pledgeModalIsOpen": true,
"playerDrawerIsOpen": false
},
"authorsReducer": {
"byline_news_12061462": {
"type": "authors",
"id": "byline_news_12061462",
"meta": {
"override": true
},
"slug": "byline_news_12061462",
"name": "Kerry Klein, KVPR",
"isLoading": false
},
"rachael-myrow": {
"type": "authors",
"id": "251",
"meta": {
"index": "authors_1716337520",
"id": "251",
"found": true
},
"name": "Rachael Myrow",
"firstName": "Rachael",
"lastName": "Myrow",
"slug": "rachael-myrow",
"email": "rmyrow@kqed.org",
"display_author_email": false,
"staff_mastheads": [
"news"
],
"title": "Senior Editor of KQED's Silicon Valley News Desk",
"bio": "Rachael Myrow is Senior Editor of KQED's Silicon Valley News Desk, reporting on topics like \u003ca href=\"https://www.kqed.org/news/12023367/what-big-tech-sees-in-donald-trump\">what Big Tech sees in President Trump\u003c/a>, \u003ca href=\"https://www.kqed.org/news/12020857/california-lawmaker-ready-revive-fight-regulating-ai\">California's many, many AI bills\u003c/a>, and the \u003ca href=\"https://www.kqed.org/news/12017713/lost-sounds-of-san-francisco\">lost sounds of San Francisco\u003c/a>. You can hear her work on \u003ca href=\"https://www.npr.org/search?query=Rachael%20Myrow&page=1\">NPR\u003c/a>, \u003ca href=\"https://theworld.org/people/rachael-myrow\">The World\u003c/a>, WBUR's \u003ca href=\"https://www.wbur.org/search?q=Rachael%20Myrow\">\u003ci>Here & Now\u003c/i>\u003c/a> and the BBC. \u003c/i>She also guest hosts for KQED's \u003ci>\u003ca href=\"https://www.kqed.org/forum/tag/rachael-myrow\">Forum\u003c/a>\u003c/i>. Over the years, she's talked with Kamau Bell, David Byrne, Kamala Harris, Tony Kushner, Armistead Maupin, Van Dyke Parks, Arnold Schwarzenegger and Tommie Smith, among others.\r\n\r\nBefore all this, she hosted \u003cem>The California Report\u003c/em> for 7+ years.\r\n\r\nAwards? Sure: Peabody, Edward R. Murrow, Regional Edward R. Murrow, RTNDA, Northern California RTNDA, SPJ Northern California Chapter, LA Press Club, Golden Mic. Prior to joining KQED, Rachael worked in Los Angeles at KPCC and Marketplace. She holds degrees in English and journalism from UC Berkeley (where she got her start in public radio on KALX-FM).\r\n\r\nOutside of the studio, you'll find Rachael hiking Bay Area trails and whipping up Instagram-ready meals in her kitchen. More recently, she's taken up native-forward gardening.",
"avatar": "https://secure.gravatar.com/avatar/87bf8cb5874e045cdff430523a6d48b1?s=600&d=blank&r=g",
"twitter": "rachaelmyrow",
"facebook": null,
"instagram": null,
"linkedin": "https://www.linkedin.com/in/rachaelmyrow/",
"sites": [
{
"site": "arts",
"roles": [
"administrator"
]
},
{
"site": "news",
"roles": [
"edit_others_posts",
"editor"
]
},
{
"site": "futureofyou",
"roles": [
"editor"
]
},
{
"site": "bayareabites",
"roles": [
"editor"
]
},
{
"site": "stateofhealth",
"roles": [
"editor"
]
},
{
"site": "science",
"roles": [
"editor"
]
},
{
"site": "food",
"roles": [
"editor"
]
},
{
"site": "forum",
"roles": [
"editor"
]
},
{
"site": "liveblog",
"roles": [
"author"
]
}
],
"headData": {
"title": "Rachael Myrow | KQED",
"description": "Senior Editor of KQED's Silicon Valley News Desk",
"ogImgSrc": "https://secure.gravatar.com/avatar/87bf8cb5874e045cdff430523a6d48b1?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/87bf8cb5874e045cdff430523a6d48b1?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/rachael-myrow"
},
"scottshafer": {
"type": "authors",
"id": "255",
"meta": {
"index": "authors_1716337520",
"id": "255",
"found": true
},
"name": "Scott Shafer",
"firstName": "Scott",
"lastName": "Shafer",
"slug": "scottshafer",
"email": "sshafer@kqed.org",
"display_author_email": false,
"staff_mastheads": [
"news"
],
"title": "KQED Contributor",
"bio": "Scott Shafer is a senior editor with the KQED Politics and Government desk. He is co-host of Political Breakdown, the award-winning radio show and podcast with a personal take on the world of politics. Scott came to KQED in 1998 to host the statewide\u003cem> California Report\u003c/em>. Prior to that he had extended stints in politics and government\u003cem>.\u003c/em> He uses that inside experience at KQED in his, reporting, hosting and analysis for the politics desk. Scott collaborated \u003cem>Political Breakdown a\u003c/em>nd on \u003cem>The Political Mind of Jerry Brown, \u003c/em>an eight-part series about the life and extraordinary political career of the former governor. For fun, he plays water polo with the San Francisco Tsunami.",
"avatar": "https://secure.gravatar.com/avatar/a62ebae45b79d7aed1a39a0e3bf68104?s=600&d=blank&r=g",
"twitter": "scottshafer",
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "news",
"roles": [
"editor"
]
},
{
"site": "stateofhealth",
"roles": [
"author"
]
},
{
"site": "science",
"roles": [
"author"
]
},
{
"site": "forum",
"roles": [
"subscriber"
]
}
],
"headData": {
"title": "Scott Shafer | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/a62ebae45b79d7aed1a39a0e3bf68104?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/a62ebae45b79d7aed1a39a0e3bf68104?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/scottshafer"
},
"mlagos": {
"type": "authors",
"id": "3239",
"meta": {
"index": "authors_1716337520",
"id": "3239",
"found": true
},
"name": "Marisa Lagos",
"firstName": "Marisa",
"lastName": "Lagos",
"slug": "mlagos",
"email": "mlagos@kqed.org",
"display_author_email": false,
"staff_mastheads": [
"news"
],
"title": "KQED Contributor",
"bio": "\u003cspan style=\"font-weight: 400;\">Marisa Lagos is a correspondent for KQED’s California Politics and Government Desk and co-hosts a weekly show and podcast, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400;\">Political Breakdown.\u003c/span>\u003c/i> \u003cspan style=\"font-weight: 400;\">At KQED, Lagos conducts reporting, analysis and investigations into state, local and national politics for radio, TV and online. Every week, she and cohost Scott Shafer sit down with political insiders on \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400;\">Political Breakdown\u003c/span>\u003c/i>\u003cspan style=\"font-weight: 400;\">, where they offer a peek into lives and personalities of those driving politics in California and beyond. \u003c/span>\r\n\r\n\u003cspan style=\"font-weight: 400;\">Previously, she worked for nine years at the San Francisco Chronicle covering San Francisco City Hall and state politics; and at the San Francisco Examiner and Los Angeles Time,. She has won awards for her work investigating the 2017 wildfires and her ongoing coverage of criminal justice issues in California. She lives in San Francisco with her two sons and husband.\u003c/span>",
"avatar": "https://secure.gravatar.com/avatar/a261a0d3696fc066871ef96b85b5e7d2?s=600&d=blank&r=g",
"twitter": "@mlagos",
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "news",
"roles": [
"editor"
]
},
{
"site": "science",
"roles": [
"editor"
]
},
{
"site": "forum",
"roles": [
"author"
]
}
],
"headData": {
"title": "Marisa Lagos | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/a261a0d3696fc066871ef96b85b5e7d2?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/a261a0d3696fc066871ef96b85b5e7d2?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/mlagos"
},
"ecruzguevarra": {
"type": "authors",
"id": "8654",
"meta": {
"index": "authors_1716337520",
"id": "8654",
"found": true
},
"name": "Ericka Cruz Guevarra",
"firstName": "Ericka",
"lastName": "Cruz Guevarra",
"slug": "ecruzguevarra",
"email": "ecruzguevarra@kqed.org",
"display_author_email": true,
"staff_mastheads": [
"news"
],
"title": "Producer, The Bay Podcast",
"bio": "Ericka Cruz Guevarra is host of \u003ca href=\"https://www.kqed.org/podcasts/thebay\">\u003cem>The Bay\u003c/em>\u003c/a> podcast at KQED. Before host, she was the show’s producer. Her work in that capacity includes a three-part reported series on policing in Vallejo, which won a 2020 excellence in journalism award from the Society of Professional Journalists. Ericka has worked as a breaking news reporter at Oregon Public Broadcasting, helped produce the Code Switch podcast, and was KQED’s inaugural Raul Ramirez Diversity Fund intern. She’s also an alumna of NPR’s Next Generation Radio program. Send her an email if you have strong feelings about whether Fairfield and Suisun City are the Bay. Ericka is represented by SAG-AFTRA.",
"avatar": "https://secure.gravatar.com/avatar/25e5ab8d3d53fad2dcc7bb2b5c506b1a?s=600&d=blank&r=g",
"twitter": "NotoriousECG",
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "arts",
"roles": [
"subscriber"
]
},
{
"site": "news",
"roles": [
"editor",
"manage_categories"
]
},
{
"site": "futureofyou",
"roles": [
"subscriber"
]
},
{
"site": "stateofhealth",
"roles": [
"subscriber"
]
},
{
"site": "science",
"roles": [
"editor"
]
},
{
"site": "forum",
"roles": [
"subscriber"
]
}
],
"headData": {
"title": "Ericka Cruz Guevarra | KQED",
"description": "Producer, The Bay Podcast",
"ogImgSrc": "https://secure.gravatar.com/avatar/25e5ab8d3d53fad2dcc7bb2b5c506b1a?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/25e5ab8d3d53fad2dcc7bb2b5c506b1a?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/ecruzguevarra"
},
"amontecillo": {
"type": "authors",
"id": "11649",
"meta": {
"index": "authors_1716337520",
"id": "11649",
"found": true
},
"name": "Alan Montecillo",
"firstName": "Alan",
"lastName": "Montecillo",
"slug": "amontecillo",
"email": "amontecillo@kqed.org",
"display_author_email": false,
"staff_mastheads": [
"news"
],
"title": "KQED Contributor",
"bio": "Alan Montecillo is the senior editor of \u003cem>\u003ca href=\"http://kqed.org/thebay\">The Bay\u003c/a>, \u003c/em> KQED's local news podcast. Before moving to the Bay Area, he worked as a senior talk show producer for WILL in Champaign-Urbana, Illinois and at Oregon Public Broadcasting in Portland, Oregon. He has won journalism awards from the Society of Professional Journalists Northern California, the Public Media Journalists Association, The Signal Awards, and has also received a regional Edward R. Murrow award. Alan is a Filipino American from Hong Kong and a graduate of Reed College.",
"avatar": "https://secure.gravatar.com/avatar/d5e4e7a76481969ccba76f4e2b5ccabc?s=600&d=blank&r=g",
"twitter": "alanmontecillo",
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "",
"roles": [
"editor"
]
},
{
"site": "news",
"roles": [
"editor",
"manage_categories"
]
}
],
"headData": {
"title": "Alan Montecillo | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/d5e4e7a76481969ccba76f4e2b5ccabc?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/d5e4e7a76481969ccba76f4e2b5ccabc?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/amontecillo"
},
"kmizuguchi": {
"type": "authors",
"id": "11739",
"meta": {
"index": "authors_1716337520",
"id": "11739",
"found": true
},
"name": "Keith Mizuguchi",
"firstName": "Keith",
"lastName": "Mizuguchi",
"slug": "kmizuguchi",
"email": "kmizuguchi@kqed.org",
"display_author_email": false,
"staff_mastheads": [],
"title": "KQED Contributor",
"bio": null,
"avatar": "https://secure.gravatar.com/avatar/ce1182f9924192ae5ea66d39a75cd7d1?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "news",
"roles": [
"editor"
]
},
{
"site": "science",
"roles": [
"editor"
]
}
],
"headData": {
"title": "Keith Mizuguchi | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/ce1182f9924192ae5ea66d39a75cd7d1?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/ce1182f9924192ae5ea66d39a75cd7d1?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/kmizuguchi"
},
"jessicakariisa": {
"type": "authors",
"id": "11831",
"meta": {
"index": "authors_1716337520",
"id": "11831",
"found": true
},
"name": "Jessica Kariisa",
"firstName": "Jessica",
"lastName": "Kariisa",
"slug": "jessicakariisa",
"email": "jkariisa@kqed.org",
"display_author_email": false,
"staff_mastheads": [
"news"
],
"title": "Producer, The Bay",
"bio": "Jessica Kariisa is the producer of The Bay. She first joined KQED as an intern for The California Report Magazine, after which she became an on-call producer. She reported a Bay Curious episode on the use of rap lyrics in criminal trials which won a Society of Professional Journalists award in 2023 for Excellence in Features Journalism and the 2023 Signal Award for Best Conversation Starter. She’s worked on podcasts for Snap Judgment and American Public Media. Before embarking on her audio career, she was a music journalist.\r\n\r\nJessica Kariisa is represented by SAG-AFTRA.",
"avatar": "https://secure.gravatar.com/avatar/4afd355fd24f5515aeab77fd6c72b671?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "arts",
"roles": [
"author"
]
},
{
"site": "news",
"roles": [
"editor",
"manage_categories"
]
}
],
"headData": {
"title": "Jessica Kariisa | KQED",
"description": "Producer, The Bay",
"ogImgSrc": "https://secure.gravatar.com/avatar/4afd355fd24f5515aeab77fd6c72b671?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/4afd355fd24f5515aeab77fd6c72b671?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/jessicakariisa"
},
"chambrick": {
"type": "authors",
"id": "11832",
"meta": {
"index": "authors_1716337520",
"id": "11832",
"found": true
},
"name": "Chris Hambrick",
"firstName": "Chris",
"lastName": "Hambrick",
"slug": "chambrick",
"email": "chambrick@kqed.org",
"display_author_email": false,
"staff_mastheads": [],
"title": "KQED Contributor",
"bio": null,
"avatar": "https://secure.gravatar.com/avatar/c4a3663ebbd3a21fa35ef06a1236ce8a?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "",
"roles": [
"editor"
]
},
{
"site": "arts",
"roles": [
"editor"
]
},
{
"site": "news",
"roles": [
"editor"
]
},
{
"site": "podcasts",
"roles": [
"editor"
]
}
],
"headData": {
"title": "Chris Hambrick | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/c4a3663ebbd3a21fa35ef06a1236ce8a?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/c4a3663ebbd3a21fa35ef06a1236ce8a?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/chambrick"
},
"cegusa": {
"type": "authors",
"id": "11869",
"meta": {
"index": "authors_1716337520",
"id": "11869",
"found": true
},
"name": "Chris Egusa",
"firstName": "Chris",
"lastName": "Egusa",
"slug": "cegusa",
"email": "cegusa@kqed.org",
"display_author_email": false,
"staff_mastheads": [],
"title": "KQED Contributor",
"bio": null,
"avatar": "https://secure.gravatar.com/avatar/86d00b34cb7eeb5247e991f0e20c70c4?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "arts",
"roles": [
"editor"
]
}
],
"headData": {
"title": "Chris Egusa | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/86d00b34cb7eeb5247e991f0e20c70c4?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/86d00b34cb7eeb5247e991f0e20c70c4?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/cegusa"
},
"mcueva": {
"type": "authors",
"id": "11943",
"meta": {
"index": "authors_1716337520",
"id": "11943",
"found": true
},
"name": "Maya Cueva",
"firstName": "Maya",
"lastName": "Cueva",
"slug": "mcueva",
"email": "mcueva@kqed.org",
"display_author_email": false,
"staff_mastheads": [],
"title": "KQED Contributor",
"bio": null,
"avatar": "https://secure.gravatar.com/avatar/26d0967153608e4720f52779f754087a?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "news",
"roles": [
"editor"
]
}
],
"headData": {
"title": "Maya Cueva | KQED",
"description": "KQED Contributor",
"ogImgSrc": "https://secure.gravatar.com/avatar/26d0967153608e4720f52779f754087a?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/26d0967153608e4720f52779f754087a?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/mcueva"
},
"msung": {
"type": "authors",
"id": "11944",
"meta": {
"index": "authors_1716337520",
"id": "11944",
"found": true
},
"name": "Morgan Sung",
"firstName": "Morgan",
"lastName": "Sung",
"slug": "msung",
"email": "msung@kqed.org",
"display_author_email": false,
"staff_mastheads": [],
"title": "Close All Tabs Host",
"bio": null,
"avatar": "https://secure.gravatar.com/avatar/34033b8d232ee6c987ca6f0a1a28f0e5?s=600&d=blank&r=g",
"twitter": null,
"facebook": null,
"instagram": null,
"linkedin": null,
"sites": [
{
"site": "news",
"roles": [
"editor"
]
}
],
"headData": {
"title": "Morgan Sung | KQED",
"description": "Close All Tabs Host",
"ogImgSrc": "https://secure.gravatar.com/avatar/34033b8d232ee6c987ca6f0a1a28f0e5?s=600&d=blank&r=g",
"twImgSrc": "https://secure.gravatar.com/avatar/34033b8d232ee6c987ca6f0a1a28f0e5?s=600&d=blank&r=g"
},
"isLoading": false,
"link": "/author/msung"
}
},
"breakingNewsReducer": {},
"pagesReducer": {},
"postsReducer": {
"stream_live": {
"type": "live",
"id": "stream_live",
"audioUrl": "https://streams.kqed.org/kqedradio",
"title": "Live Stream",
"excerpt": "Live Stream information currently unavailable.",
"link": "/radio",
"featImg": "",
"label": {
"name": "KQED Live",
"link": "/"
}
},
"stream_kqedNewscast": {
"type": "posts",
"id": "stream_kqedNewscast",
"audioUrl": "https://www.kqed.org/.stream/anon/radio/RDnews/newscast.mp3?_=1",
"title": "KQED Newscast",
"featImg": "",
"label": {
"name": "88.5 FM",
"link": "/"
}
},
"news_12065748": {
"type": "posts",
"id": "news_12065748",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12065748",
"score": null,
"sort": [
1764722123000
]
},
"guestAuthors": [],
"slug": "how-trumps-ai-czar-is-benefiting-from-policies-he-sets",
"title": "How Trump’s AI Czar Is Benefiting From Policies He Sets",
"publishDate": 1764722123,
"format": "audio",
"headTitle": "How Trump’s AI Czar Is Benefiting From Policies He Sets | KQED",
"labelTerm": {},
"content": "\u003cp>Since President Trump appointed him as the White House artificial intelligence and cryptocurrency czar, Silicon Valley venture capitalist David Sacks has been in a position to drive policy in both emerging technologies. And according to recent reporting by the New York Times, Sacks has helped formulate policies that benefit him and his tech friends. Scott and Marisa are joined by New York Times reporter Ryan Mac, who was part of the team revealing Sacks’ conflicts of interest.\u003c/p>\n\u003cp>Check out \u003ca class=\"c-link c-link--underline\" href=\"https://www.kqed.org/newsletters/political-breakdown\" target=\"_blank\" rel=\"noopener noreferrer\" data-stringify-link=\"https://www.kqed.org/newsletters/political-breakdown\" data-sk=\"tooltip_parent\">Political Breakdown’s weekly newsletter\u003c/a>, delivered straight to your inbox.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "A New York Times investigation revealed San Francisco tech billionaire David Sacks has influenced federal A.I. and crypto policy to benefit himself and his Silicon Valley tech friends. ",
"status": "publish",
"parent": 0,
"modified": 1764725700,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 4,
"wordCount": 94
},
"headData": {
"title": "How Trump’s AI Czar Is Benefiting From Policies He Sets | KQED",
"description": "A New York Times investigation revealed San Francisco tech billionaire David Sacks has influenced federal A.I. and crypto policy to benefit himself and his Silicon Valley tech friends. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "How Trump’s AI Czar Is Benefiting From Policies He Sets",
"datePublished": "2025-12-02T16:35:23-08:00",
"dateModified": "2025-12-02T17:35:00-08:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 13,
"slug": "politics",
"name": "Politics"
},
"source": "Political Breakdown",
"audioUrl": "https://www.podtrac.com/pts/redirect.mp3/chrt.fm/track/G6C7C3/traffic.megaphone.fm/KQINC9929705027.mp3",
"sticky": false,
"nprStoryId": "kqed-12065748",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12065748/how-trumps-ai-czar-is-benefiting-from-policies-he-sets",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>Since President Trump appointed him as the White House artificial intelligence and cryptocurrency czar, Silicon Valley venture capitalist David Sacks has been in a position to drive policy in both emerging technologies. And according to recent reporting by the New York Times, Sacks has helped formulate policies that benefit him and his tech friends. Scott and Marisa are joined by New York Times reporter Ryan Mac, who was part of the team revealing Sacks’ conflicts of interest.\u003c/p>\n\u003cp>Check out \u003ca class=\"c-link c-link--underline\" href=\"https://www.kqed.org/newsletters/political-breakdown\" target=\"_blank\" rel=\"noopener noreferrer\" data-stringify-link=\"https://www.kqed.org/newsletters/political-breakdown\" data-sk=\"tooltip_parent\">Political Breakdown’s weekly newsletter\u003c/a>, delivered straight to your inbox.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12065748/how-trumps-ai-czar-is-benefiting-from-policies-he-sets",
"authors": [
"255",
"3239"
],
"programs": [
"news_33544"
],
"categories": [
"news_8",
"news_13"
],
"tags": [
"news_25184",
"news_34755",
"news_22757",
"news_36169",
"news_34377",
"news_22235",
"news_17968"
],
"featImg": "news_11999342",
"label": "source_news_12065748"
},
"news_12063587": {
"type": "posts",
"id": "news_12063587",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12063587",
"score": null,
"sort": [
1762796967000
]
},
"guestAuthors": [],
"slug": "ai-boom-leads-to-increased-concerns-of-environmental-impacts-of-data-centers",
"title": "AI Boom Leads To Increased Concerns Of Environmental Impacts Of Data Centers",
"publishDate": 1762796967,
"format": "audio",
"headTitle": "AI Boom Leads To Increased Concerns Of Environmental Impacts Of Data Centers | KQED",
"labelTerm": {},
"content": "\u003cp>\u003cb>Here are the morning’s top stories on Monday, November 10, 2025…\u003c/b>\u003c/p>\n\u003cul>\n\u003cli style=\"font-weight: 400\">\u003cspan style=\"font-weight: 400\">California legislators considered dozens of bills related to artificial intelligence this year. Those numbers have spiked as lawmakers grapple with the technology’s increasing presence and possible negative consequences. One point of concern: \u003c/span>\u003ca href=\"https://www.capradio.org/articles/2025/11/06/with-the-rise-of-ai-californias-data-centers-require-more-water-energy-but-by-how-much/\">\u003cspan style=\"font-weight: 400\">the impact that generative AI will have on the state’s natural resource\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">s as it becomes an everyday tool for Californians. \u003c/span>\u003c/li>\n\u003cli>Environmental activists are celebrating \u003ca href=\"https://www.kqed.org/news/12063468/environmentalists-celebrate-retirement-of-platform-esther-a-socal-oil-rig\">the retirement of a Southern California oil rig\u003c/a>, with a celebration at San Francisco’s waterfront. The California State Lands Commission officially finalized the decommission last week.\u003c/li>\n\u003c/ul>\n\u003ch2 class=\"page-title\">\u003ca href=\"https://www.capradio.org/articles/2025/11/06/with-the-rise-of-ai-californias-data-centers-require-more-water-energy-but-by-how-much/\">\u003cstrong>With The Rise Of AI, California’s Data Centers Require More Water, Energy. But By How Much?\u003c/strong>\u003c/a>\u003c/h2>\n\u003cp>Earlier this year, Governor Gavin Newsom \u003ca href=\"https://www.gov.ca.gov/2025/09/29/governor-newsom-signs-sb-53-advancing-californias-world-leading-artificial-intelligence-industry/\">signed\u003c/a> into law\u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB53\"> Senate Bill 53\u003c/a>, which would require large model developers like Anthropic and Open AI to be transparent about safety measures they put in place to prevent catastrophic events. The legislation would also create CalCompute, a public cloud infrastructure that expands access to AI resources for researchers, startups and public institutions.\u003c/p>\n\u003cp>This was one of several bills lawmakers introduced in Sacramento to regulate the AI industry. Assemblymember Rebecca Bauer-Kahan authored a bill requiring data centers to report their energy usage. It failed to pass. But she said the work for the bill began with that question, and a desire for more transparency. “As I started to ask questions about what kind of data was going into our understanding around the energy needs of the growing data center industry, it seemed like the answer was there wasn’t great data,” Bauer-Kahan said.\u003c/p>\n\u003cp>Assemblymember Diane Papan authored a similar bill looking to track water use at data centers. It required centers to provide estimates of their expected water use when applying for a business license, and an annual water use report thereafter. These centers generally require large amounts of water to cool down servers and other equipment. “I feel that the more information we have, it’ll help us integrate the growth of data centers into that broader task of climate resilient resource management, particularly as it relates to water,” Papan said. The bill passed through the legislature but was vetoed by Governor Newsom. In a statement, Newsom said he was “reluctant to impose rigid reporting requirements about operational details on this sector without understanding the full impact on businesses and the consumers of this technology.” The decision surprised Papan, who originally described the bill as one that was sure to pass given legislators’ desire to optimize California’s water planning. She said she disagrees with his thinking.\u003c/p>\n\u003cp>Data centers are places that contain the servers needed to provide essential services online, like web searches and video streaming. Large-scale data centers have been around for decades, first coming into existence about thirty years ago. But with the more recent rise of generative AI — the kind used for chatbots like ChatGPT — the water and energy these centers demand has gone up. Shaolei Ren, an associate professor of electrical and computer engineering at UC Riverside, said discussions about the environmental impacts associated with increasing AI use aren’t completely new. Artificial intelligence has been integrated in online platforms for years now — like with YouTube, for example, where AI is used to give tailored video recommendations. But Ren said it wasn’t until after 2020, when the general public became more aware of the integration of generative AI online, that these conversations really took hold. And with the technology’s growth comes a greater demand on resources – in California and the rest of the country. Citing a Lawrence Berkeley National Laboratory report, Ren said data centers accounted for 3% of the nation’s energy use in 2020. That’s projected to increase by up to 12% by 2028. Ren said the way in which data centers consume these resources also plays a part in their impact on state resources. A data center may consume the same amount of water in a year as an office building, for example. But if most of that consumption happens during one hot summer month instead of evenly throughout the year, he said that could create a different kind of stress on water resources.\u003c/p>\n\u003ch2 class=\"routes-Site-routes-Post-Title-__Title__title\">\u003ca href=\"https://www.kqed.org/news/12063468/environmentalists-celebrate-retirement-of-platform-esther-a-socal-oil-rig\">\u003cstrong>Environmentalists Celebrate ‘Retirement’ Of Platform Esther, A SoCal Oil Rig\u003c/strong>\u003c/a>\u003c/h2>\n\u003cp>Environmental activists partied outside the \u003ca href=\"https://www.kqed.org/news/tag/san-francisco\">San Francisco\u003c/a> Ferry Building on Friday to celebrate the decommission of a Southern California oil rig. The Center for Biological Diversity called the event a “retirement party” for Platform Esther, a soon-to-be decommissioned oil rig off the coast of Orange County.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>Activists donned party hats and performed their own rendition of Kool & the Gang’s \u003cem>Celebration\u003c/em>, renamed \u003cem>Decommission. \u003c/em>They danced with a giant inflatable whale, and tore into a blue-iced cake decorated with a paper cutout of an oil rig.\u003c/p>\n\u003cp>Inside the Ferry Building, the California State Lands Commission officially finalized the decommission at a hearing. “This is actually a historic win. This platform is being retired about fifteen years ahead of the official end of its useful life,” said Ilonka Zlatar, an organizer with Oil and Gas Action Network. “We want to thank the State Lands Commission and the agencies that are standing up and helping us to transition into the clean energy economy that we need.”\u003c/p>\n\u003cp>Platform Esther was first built in 1965 and is located 1.5 miles off the coast of Seal Beach. It was rebuilt in the ’80s after sustaining major damage from a winter storm in 1983. Production officially ceased in August 2025.\u003c/p>\n\u003cp>\u003c/p>\n\u003cp>New leases for oil drilling off the coast haven’t been approved since 1984, and past Republican presidents have worked with Democrats in protecting California’s waters from drilling. But conservation efforts have faced new threats under the current and past Trump administrations, which \u003ca href=\"https://www.sfchronicle.com/california/article/trump-offshore-drilling-21116334.php\">recently revealed a proposal\u003c/a> to dramatically ramp up oil drilling off California’s coast to increase the country’s energy independence.\u003c/p>\n\n",
"blocks": [],
"excerpt": "State lawmakers introduced a handful of AI-related bills, but two related to data centers were not approved.",
"status": "publish",
"parent": 0,
"modified": 1762796967,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 12,
"wordCount": 1038
},
"headData": {
"title": "AI Boom Leads To Increased Concerns Of Environmental Impacts Of Data Centers | KQED",
"description": "State lawmakers introduced a handful of AI-related bills, but two related to data centers were not approved.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "AI Boom Leads To Increased Concerns Of Environmental Impacts Of Data Centers",
"datePublished": "2025-11-10T09:49:27-08:00",
"dateModified": "2025-11-10T09:49:27-08:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 33520,
"slug": "podcast",
"name": "Podcast"
},
"source": "The California Report",
"sourceUrl": "https://www.kqed.org/news/tag/tcrarchive/",
"audioUrl": "https://www.podtrac.com/pts/redirect.mp3/chrt.fm/track/G6C7C3/traffic.megaphone.fm/KQINC5370685452.mp3?updated=1762787431",
"sticky": false,
"nprStoryId": "kqed-12063587",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12063587/ai-boom-leads-to-increased-concerns-of-environmental-impacts-of-data-centers",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>\u003cb>Here are the morning’s top stories on Monday, November 10, 2025…\u003c/b>\u003c/p>\n\u003cul>\n\u003cli style=\"font-weight: 400\">\u003cspan style=\"font-weight: 400\">California legislators considered dozens of bills related to artificial intelligence this year. Those numbers have spiked as lawmakers grapple with the technology’s increasing presence and possible negative consequences. One point of concern: \u003c/span>\u003ca href=\"https://www.capradio.org/articles/2025/11/06/with-the-rise-of-ai-californias-data-centers-require-more-water-energy-but-by-how-much/\">\u003cspan style=\"font-weight: 400\">the impact that generative AI will have on the state’s natural resource\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">s as it becomes an everyday tool for Californians. \u003c/span>\u003c/li>\n\u003cli>Environmental activists are celebrating \u003ca href=\"https://www.kqed.org/news/12063468/environmentalists-celebrate-retirement-of-platform-esther-a-socal-oil-rig\">the retirement of a Southern California oil rig\u003c/a>, with a celebration at San Francisco’s waterfront. The California State Lands Commission officially finalized the decommission last week.\u003c/li>\n\u003c/ul>\n\u003ch2 class=\"page-title\">\u003ca href=\"https://www.capradio.org/articles/2025/11/06/with-the-rise-of-ai-californias-data-centers-require-more-water-energy-but-by-how-much/\">\u003cstrong>With The Rise Of AI, California’s Data Centers Require More Water, Energy. But By How Much?\u003c/strong>\u003c/a>\u003c/h2>\n\u003cp>Earlier this year, Governor Gavin Newsom \u003ca href=\"https://www.gov.ca.gov/2025/09/29/governor-newsom-signs-sb-53-advancing-californias-world-leading-artificial-intelligence-industry/\">signed\u003c/a> into law\u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB53\"> Senate Bill 53\u003c/a>, which would require large model developers like Anthropic and Open AI to be transparent about safety measures they put in place to prevent catastrophic events. The legislation would also create CalCompute, a public cloud infrastructure that expands access to AI resources for researchers, startups and public institutions.\u003c/p>\n\u003cp>This was one of several bills lawmakers introduced in Sacramento to regulate the AI industry. Assemblymember Rebecca Bauer-Kahan authored a bill requiring data centers to report their energy usage. It failed to pass. But she said the work for the bill began with that question, and a desire for more transparency. “As I started to ask questions about what kind of data was going into our understanding around the energy needs of the growing data center industry, it seemed like the answer was there wasn’t great data,” Bauer-Kahan said.\u003c/p>\n\u003cp>Assemblymember Diane Papan authored a similar bill looking to track water use at data centers. It required centers to provide estimates of their expected water use when applying for a business license, and an annual water use report thereafter. These centers generally require large amounts of water to cool down servers and other equipment. “I feel that the more information we have, it’ll help us integrate the growth of data centers into that broader task of climate resilient resource management, particularly as it relates to water,” Papan said. The bill passed through the legislature but was vetoed by Governor Newsom. In a statement, Newsom said he was “reluctant to impose rigid reporting requirements about operational details on this sector without understanding the full impact on businesses and the consumers of this technology.” The decision surprised Papan, who originally described the bill as one that was sure to pass given legislators’ desire to optimize California’s water planning. She said she disagrees with his thinking.\u003c/p>\n\u003cp>Data centers are places that contain the servers needed to provide essential services online, like web searches and video streaming. Large-scale data centers have been around for decades, first coming into existence about thirty years ago. But with the more recent rise of generative AI — the kind used for chatbots like ChatGPT — the water and energy these centers demand has gone up. Shaolei Ren, an associate professor of electrical and computer engineering at UC Riverside, said discussions about the environmental impacts associated with increasing AI use aren’t completely new. Artificial intelligence has been integrated in online platforms for years now — like with YouTube, for example, where AI is used to give tailored video recommendations. But Ren said it wasn’t until after 2020, when the general public became more aware of the integration of generative AI online, that these conversations really took hold. And with the technology’s growth comes a greater demand on resources – in California and the rest of the country. Citing a Lawrence Berkeley National Laboratory report, Ren said data centers accounted for 3% of the nation’s energy use in 2020. That’s projected to increase by up to 12% by 2028. Ren said the way in which data centers consume these resources also plays a part in their impact on state resources. A data center may consume the same amount of water in a year as an office building, for example. But if most of that consumption happens during one hot summer month instead of evenly throughout the year, he said that could create a different kind of stress on water resources.\u003c/p>\n\u003ch2 class=\"routes-Site-routes-Post-Title-__Title__title\">\u003ca href=\"https://www.kqed.org/news/12063468/environmentalists-celebrate-retirement-of-platform-esther-a-socal-oil-rig\">\u003cstrong>Environmentalists Celebrate ‘Retirement’ Of Platform Esther, A SoCal Oil Rig\u003c/strong>\u003c/a>\u003c/h2>\n\u003cp>Environmental activists partied outside the \u003ca href=\"https://www.kqed.org/news/tag/san-francisco\">San Francisco\u003c/a> Ferry Building on Friday to celebrate the decommission of a Southern California oil rig. The Center for Biological Diversity called the event a “retirement party” for Platform Esther, a soon-to-be decommissioned oil rig off the coast of Orange County.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>Activists donned party hats and performed their own rendition of Kool & the Gang’s \u003cem>Celebration\u003c/em>, renamed \u003cem>Decommission. \u003c/em>They danced with a giant inflatable whale, and tore into a blue-iced cake decorated with a paper cutout of an oil rig.\u003c/p>\n\u003cp>Inside the Ferry Building, the California State Lands Commission officially finalized the decommission at a hearing. “This is actually a historic win. This platform is being retired about fifteen years ahead of the official end of its useful life,” said Ilonka Zlatar, an organizer with Oil and Gas Action Network. “We want to thank the State Lands Commission and the agencies that are standing up and helping us to transition into the clean energy economy that we need.”\u003c/p>\n\u003cp>Platform Esther was first built in 1965 and is located 1.5 miles off the coast of Seal Beach. It was rebuilt in the ’80s after sustaining major damage from a winter storm in 1983. Production officially ceased in August 2025.\u003c/p>\n\u003cp>\u003c/p>\n\u003cp>New leases for oil drilling off the coast haven’t been approved since 1984, and past Republican presidents have worked with Democrats in protecting California’s waters from drilling. But conservation efforts have faced new threats under the current and past Trump administrations, which \u003ca href=\"https://www.sfchronicle.com/california/article/trump-offshore-drilling-21116334.php\">recently revealed a proposal\u003c/a> to dramatically ramp up oil drilling off California’s coast to increase the country’s energy independence.\u003c/p>\n\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12063587/ai-boom-leads-to-increased-concerns-of-environmental-impacts-of-data-centers",
"authors": [
"11739"
],
"programs": [
"news_72"
],
"categories": [
"news_33520",
"news_34018"
],
"tags": [
"news_25184",
"news_34755",
"news_36087",
"news_36091",
"news_36089",
"news_36088",
"news_36090",
"news_21998",
"news_21268"
],
"featImg": "news_12063588",
"label": "source_news_12063587"
},
"news_12061462": {
"type": "posts",
"id": "news_12061462",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12061462",
"score": null,
"sort": [
1761570036000
]
},
"guestAuthors": [],
"slug": "new-state-law-requires-additional-safeguards-when-police-use-generative-ai",
"title": "New State Law Requires Additional Safeguards When Police Use Generative AI",
"publishDate": 1761570036,
"format": "standard",
"headTitle": "New State Law Requires Additional Safeguards When Police Use Generative AI | KQED",
"labelTerm": {
"term": 29969,
"site": "news"
},
"content": "\u003cp>FRESNO, Calif. — Gov. Gavin Newsom this month signed a \u003ca href=\"https://www.kqed.org/news/12007520/how-artificial-intelligence-is-changing-the-reports-police-write\">first-of-its-kind California law\u003c/a> requiring police to disclose how they use generative artificial intelligence, a move aimed at boosting transparency and public trust.\u003c/p>\n\u003cp>California is among the first states to address the issue. KQED \u003ca href=\"https://www.kqed.org/news/12007520/how-artificial-intelligence-is-changing-the-reports-police-write\">first reported\u003c/a> last October on local departments adopting AI tools. The reporting was cited in the legislative analysis of the bill that ultimately became the law.\u003c/p>\n\u003cp>Senate Bill 524, signed into law Oct. 10, requires police officers to disclose when they use AI to write police reports. The tools include Draft One, an AI assistant that transcribes and summarizes body-camera footage to produce a draft report, which officers can then revise and edit.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>Specifically, the law now requires a written disclosure to appear at the bottom of each page of a police report for which Draft One or other similar tools were used. The legislation also requires an “audit trail” that would preserve the original draft as well as identify the source bodycam footage or audio.\u003c/p>\n\u003cp>Police departments in \u003ca href=\"https://www.kqed.org/news/12007520/how-artificial-intelligence-is-changing-the-reports-police-write\">East Palo Alto\u003c/a> and \u003ca href=\"https://www.kvpr.org/government-politics/2024-11-15/ai-is-helping-fresno-police-officers-write-their-reports-could-it-outperform-them\">Fresno\u003c/a> were among the first in the state to adopt the technology.\u003c/p>\n\u003cp>Axon, the company that developed Draft One, told KVPR and KQED last year that its developers built safeguards into their software. For example, officers must fill in prompts within the generated report, then sign off on the report’s accuracy before it can be submitted. The tool also includes a disclaimer that Draft One was used, though police agencies have thus far been able to customize where in a report it’s placed.\u003c/p>\n\u003cfigure id=\"attachment_12006138\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12006138\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/240923-AI-IN-POLICING-MD-06-KQED.jpg\" alt=\"The torso of a person dressed in a police uniform holds a hand over a body camera.\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/240923-AI-IN-POLICING-MD-06-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/240923-AI-IN-POLICING-MD-06-KQED-800x533.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/240923-AI-IN-POLICING-MD-06-KQED-1020x680.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/240923-AI-IN-POLICING-MD-06-KQED-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/240923-AI-IN-POLICING-MD-06-KQED-1536x1024.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/240923-AI-IN-POLICING-MD-06-KQED-1920x1280.jpg 1920w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">An East Palo Alto Police officer’s Axon body camera in East Palo Alto on Sept. 23, 2024. \u003ccite>(Martin do Nascimento/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Police departments have said the technology saves officers significant time, and even that some AI-generated reports are better than the ones written entirely by officers. Nevertheless, the bill arose out of concerns that bias or errors generated by AI software could make their way into final incident reports, which play a key role in charging, detaining and sentencing suspects.\u003c/p>\n\u003cp>Kate Chatfield, executive director of the California Public Defenders Association, which sponsored the bill, said she’s grateful the measure became law.\u003c/p>\n\u003cp>“Due process requires transparency,” Chatfield wrote in a public statement. “Everyone in the legal system — judges, juries, attorneys and the accused — deserve to know who wrote the police report.”[aside postID=news_12050772 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/240923-AI-IN-POLICING-MD-03-KQED.jpg?ver=1727233046']“With SB 524, California is sending a clear message: Innovation in policing must be tethered to accountability,” state Sen. Jesse Arreguín, who wrote the bill, said in the statement. “No more opaque reports, no more guessing whether AI shaped the narrative.”\u003c/p>\n\u003cp>Kevin Little, a defense attorney in Fresno, said the law is a step in the right direction, but not a true remedy.\u003c/p>\n\u003cp>“My own experience with AI in an unrelated context leads me to conclude that AI platforms have a significant amount of user bias and tend to support the agendas of the user,” he said.\u003c/p>\n\u003cp>Larry Bowlan, a spokesperson for the Fresno Police Department, said the agency had already implemented some of the safeguards now required by the law and does not expect the new rules to be especially burdensome.\u003c/p>\n\u003cp>“Our AI-powered narrative assistant … already generates a disclosure and requires our users to sign acknowledgements. Draft One also already produces the requisite audit trail,” he wrote in an email. “We are actively working with our vendor on the best solution for preserving and storing the first draft provided by the assistant, as well as a minor tweak to ensure the disclosure is present on each printed page, rather than just the first page as it is now.”\u003c/p>\n\u003cp>A spokesperson for the East Palo Alto Department said his agency has no official response to the law at this time.\u003c/p>\n\u003cfigure id=\"attachment_12007615\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12007615\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/240923-AI-IN-POLICING-MD-15-KQED-5.jpg\" alt=\"A woman dressed in a police uniform sits at a desk in an office looking at a computer screen.\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/240923-AI-IN-POLICING-MD-15-KQED-5.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/240923-AI-IN-POLICING-MD-15-KQED-5-800x533.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/240923-AI-IN-POLICING-MD-15-KQED-5-1020x680.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/240923-AI-IN-POLICING-MD-15-KQED-5-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/240923-AI-IN-POLICING-MD-15-KQED-5-1536x1024.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/240923-AI-IN-POLICING-MD-15-KQED-5-1920x1280.jpg 1920w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">East Palo Alto Police Officer Wendy Venegas reviews body camera footage and uses Axon’s Draft One AI-based system to draft reports based on the audio from the camera at police headquarters in East Palo Alto on Sept. 23, 2024. \u003ccite>(Martin do Nascimento/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Opponents of the bill included the California Police Chiefs Association and the Police Officers Research Association of California, a police union advocacy and lobbying group. The Chiefs Association did not respond to a request for comment. In a statement, PORAC President Brian R. Marvel said the signed version of the law is an improvement over earlier drafts.[aside postID=news_12060365 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty.jpg']“In its original form, SB 524 would have put significant administrative burden on already short-staffed police forces and created broad liability by requiring agencies to retain every AI-generated draft, interim, and final version of a report, each labeled with AI disclosure language,” he wrote. “PORAC advocated to amend this bill … We were pleased to see several of these amendments taken, with the final version of the bill significantly narrowed.”\u003c/p>\n\u003cp>Axon representative Victoria Keough said the company is committed to complying with all state and federal laws, including SB 524.\u003c/p>\n\u003cp>“When developing AI for public safety, transparency and accountability are essential,” Keough wrote in a statement. “Responsible innovation remains at the core of how Axon designs and delivers new technology.”\u003c/p>\n\u003cp>The new requirements go into effect on Jan. 1, 2026.\u003c/p>\n\u003cp>\u003ca href=\"https://www.kvpr.org/people/kerry-klein\">\u003cem>Kerry Klein\u003c/em>\u003c/a>\u003cem> is a reporter and editor with KVPR in Fresno. Additional reporting was provided by KQED’s \u003c/em>\u003ca href=\"https://www.kqed.org/author/slewis\">\u003cem>Sukey Lewis\u003c/em>\u003c/a>. \u003cem>The story was produced with support from \u003c/em>\u003ca href=\"https://www.kqed.org/californianewsroom\">\u003cem>The California Newsroom\u003c/em>\u003c/a>\u003cem>, a collaboration of public media outlets throughout the state. \u003c/em>\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "The new law requires written disclosures and supporting materials whenever departments use AI to generate police reports.",
"status": "publish",
"parent": 0,
"modified": 1761597406,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 21,
"wordCount": 982
},
"headData": {
"title": "New State Law Requires Additional Safeguards When Police Use Generative AI | KQED",
"description": "The new law requires written disclosures and supporting materials whenever departments use AI to generate police reports.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "New State Law Requires Additional Safeguards When Police Use Generative AI",
"datePublished": "2025-10-27T06:00:36-07:00",
"dateModified": "2025-10-27T13:36:46-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"sticky": false,
"nprByline": "Kerry Klein, KVPR",
"nprStoryId": "kqed-12061462",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"showOnAuthorArchivePages": "No",
"articleAge": "0",
"path": "/news/12061462/new-state-law-requires-additional-safeguards-when-police-use-generative-ai",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>FRESNO, Calif. — Gov. Gavin Newsom this month signed a \u003ca href=\"https://www.kqed.org/news/12007520/how-artificial-intelligence-is-changing-the-reports-police-write\">first-of-its-kind California law\u003c/a> requiring police to disclose how they use generative artificial intelligence, a move aimed at boosting transparency and public trust.\u003c/p>\n\u003cp>California is among the first states to address the issue. KQED \u003ca href=\"https://www.kqed.org/news/12007520/how-artificial-intelligence-is-changing-the-reports-police-write\">first reported\u003c/a> last October on local departments adopting AI tools. The reporting was cited in the legislative analysis of the bill that ultimately became the law.\u003c/p>\n\u003cp>Senate Bill 524, signed into law Oct. 10, requires police officers to disclose when they use AI to write police reports. The tools include Draft One, an AI assistant that transcribes and summarizes body-camera footage to produce a draft report, which officers can then revise and edit.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>Specifically, the law now requires a written disclosure to appear at the bottom of each page of a police report for which Draft One or other similar tools were used. The legislation also requires an “audit trail” that would preserve the original draft as well as identify the source bodycam footage or audio.\u003c/p>\n\u003cp>Police departments in \u003ca href=\"https://www.kqed.org/news/12007520/how-artificial-intelligence-is-changing-the-reports-police-write\">East Palo Alto\u003c/a> and \u003ca href=\"https://www.kvpr.org/government-politics/2024-11-15/ai-is-helping-fresno-police-officers-write-their-reports-could-it-outperform-them\">Fresno\u003c/a> were among the first in the state to adopt the technology.\u003c/p>\n\u003cp>Axon, the company that developed Draft One, told KVPR and KQED last year that its developers built safeguards into their software. For example, officers must fill in prompts within the generated report, then sign off on the report’s accuracy before it can be submitted. The tool also includes a disclaimer that Draft One was used, though police agencies have thus far been able to customize where in a report it’s placed.\u003c/p>\n\u003cfigure id=\"attachment_12006138\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12006138\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/240923-AI-IN-POLICING-MD-06-KQED.jpg\" alt=\"The torso of a person dressed in a police uniform holds a hand over a body camera.\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/240923-AI-IN-POLICING-MD-06-KQED.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/240923-AI-IN-POLICING-MD-06-KQED-800x533.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/240923-AI-IN-POLICING-MD-06-KQED-1020x680.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/240923-AI-IN-POLICING-MD-06-KQED-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/240923-AI-IN-POLICING-MD-06-KQED-1536x1024.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/240923-AI-IN-POLICING-MD-06-KQED-1920x1280.jpg 1920w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">An East Palo Alto Police officer’s Axon body camera in East Palo Alto on Sept. 23, 2024. \u003ccite>(Martin do Nascimento/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Police departments have said the technology saves officers significant time, and even that some AI-generated reports are better than the ones written entirely by officers. Nevertheless, the bill arose out of concerns that bias or errors generated by AI software could make their way into final incident reports, which play a key role in charging, detaining and sentencing suspects.\u003c/p>\n\u003cp>Kate Chatfield, executive director of the California Public Defenders Association, which sponsored the bill, said she’s grateful the measure became law.\u003c/p>\n\u003cp>“Due process requires transparency,” Chatfield wrote in a public statement. “Everyone in the legal system — judges, juries, attorneys and the accused — deserve to know who wrote the police report.”\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12050772",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2024/09/240923-AI-IN-POLICING-MD-03-KQED.jpg?ver=1727233046",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>“With SB 524, California is sending a clear message: Innovation in policing must be tethered to accountability,” state Sen. Jesse Arreguín, who wrote the bill, said in the statement. “No more opaque reports, no more guessing whether AI shaped the narrative.”\u003c/p>\n\u003cp>Kevin Little, a defense attorney in Fresno, said the law is a step in the right direction, but not a true remedy.\u003c/p>\n\u003cp>“My own experience with AI in an unrelated context leads me to conclude that AI platforms have a significant amount of user bias and tend to support the agendas of the user,” he said.\u003c/p>\n\u003cp>Larry Bowlan, a spokesperson for the Fresno Police Department, said the agency had already implemented some of the safeguards now required by the law and does not expect the new rules to be especially burdensome.\u003c/p>\n\u003cp>“Our AI-powered narrative assistant … already generates a disclosure and requires our users to sign acknowledgements. Draft One also already produces the requisite audit trail,” he wrote in an email. “We are actively working with our vendor on the best solution for preserving and storing the first draft provided by the assistant, as well as a minor tweak to ensure the disclosure is present on each printed page, rather than just the first page as it is now.”\u003c/p>\n\u003cp>A spokesperson for the East Palo Alto Department said his agency has no official response to the law at this time.\u003c/p>\n\u003cfigure id=\"attachment_12007615\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12007615\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/240923-AI-IN-POLICING-MD-15-KQED-5.jpg\" alt=\"A woman dressed in a police uniform sits at a desk in an office looking at a computer screen.\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/240923-AI-IN-POLICING-MD-15-KQED-5.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/240923-AI-IN-POLICING-MD-15-KQED-5-800x533.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/240923-AI-IN-POLICING-MD-15-KQED-5-1020x680.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/240923-AI-IN-POLICING-MD-15-KQED-5-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/240923-AI-IN-POLICING-MD-15-KQED-5-1536x1024.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2024/10/240923-AI-IN-POLICING-MD-15-KQED-5-1920x1280.jpg 1920w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">East Palo Alto Police Officer Wendy Venegas reviews body camera footage and uses Axon’s Draft One AI-based system to draft reports based on the audio from the camera at police headquarters in East Palo Alto on Sept. 23, 2024. \u003ccite>(Martin do Nascimento/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Opponents of the bill included the California Police Chiefs Association and the Police Officers Research Association of California, a police union advocacy and lobbying group. The Chiefs Association did not respond to a request for comment. In a statement, PORAC President Brian R. Marvel said the signed version of the law is an improvement over earlier drafts.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12060365",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/SamAltmanGetty.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>“In its original form, SB 524 would have put significant administrative burden on already short-staffed police forces and created broad liability by requiring agencies to retain every AI-generated draft, interim, and final version of a report, each labeled with AI disclosure language,” he wrote. “PORAC advocated to amend this bill … We were pleased to see several of these amendments taken, with the final version of the bill significantly narrowed.”\u003c/p>\n\u003cp>Axon representative Victoria Keough said the company is committed to complying with all state and federal laws, including SB 524.\u003c/p>\n\u003cp>“When developing AI for public safety, transparency and accountability are essential,” Keough wrote in a statement. “Responsible innovation remains at the core of how Axon designs and delivers new technology.”\u003c/p>\n\u003cp>The new requirements go into effect on Jan. 1, 2026.\u003c/p>\n\u003cp>\u003ca href=\"https://www.kvpr.org/people/kerry-klein\">\u003cem>Kerry Klein\u003c/em>\u003c/a>\u003cem> is a reporter and editor with KVPR in Fresno. Additional reporting was provided by KQED’s \u003c/em>\u003ca href=\"https://www.kqed.org/author/slewis\">\u003cem>Sukey Lewis\u003c/em>\u003c/a>. \u003cem>The story was produced with support from \u003c/em>\u003ca href=\"https://www.kqed.org/californianewsroom\">\u003cem>The California Newsroom\u003c/em>\u003c/a>\u003cem>, a collaboration of public media outlets throughout the state. \u003c/em>\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12061462/new-state-law-requires-additional-safeguards-when-police-use-generative-ai",
"authors": [
"byline_news_12061462"
],
"categories": [
"news_31795",
"news_34167",
"news_8",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_34755",
"news_1386",
"news_18538",
"news_27626",
"news_116",
"news_34586",
"news_35940",
"news_1631"
],
"affiliates": [
"news_29969"
],
"featImg": "news_12061541",
"label": "news_29969"
},
"news_12060365": {
"type": "posts",
"id": "news_12060365",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12060365",
"score": null,
"sort": [
1760734445000
]
},
"guestAuthors": [],
"slug": "chatgpt-will-soon-allow-adults-to-generate-erotica-is-this-the-future-we-want",
"title": "ChatGPT Will Soon Allow Adults to Generate Erotica. Is This the Future We Want?",
"publishDate": 1760734445,
"format": "standard",
"headTitle": "ChatGPT Will Soon Allow Adults to Generate Erotica. Is This the Future We Want? | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>OpenAI isn’t the first developer to announce plans to \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">offer erotic content on its chatbot\u003c/a>. But the blowback against the tech company’s decision to loosen restrictions this week has been bigger, given the San Francisco-based company’s promise to ensure its AI\u003ca href=\"https://openai.com/our-structure/\"> benefits all of humanity\u003c/a>.\u003c/p>\n\u003cp>The most significant change will roll out in December, when OpenAI will allow more comprehensive age-gating, allowing verified adults to generate erotic content using the tool — “as part of our ‘treat adult users like adults’ principle,” OpenAI CEO Sam \u003ca href=\"https://x.com/sama/status/1978129344598827128\">Altman posted Tuesday\u003c/a> on the social media platform X.\u003c/p>\n\u003cp>Consumer advocates say OpenAI is following the lead of xAI’s Grok, which offers loosely moderated “adult” modes with minimal age verification, raising concerns that teenage users may have access to explicit content. Meta AI is believed to be following xAI’s lead as well, and its back and forth over whether it is intentionally pushing mature content to minors has \u003ca href=\"https://www.reuters.com/world/us/us-senator-hawley-launches-probe-into-meta-ai-policies-2025-08-15/\">prompted\u003c/a> U.S. Sen. Josh Hawley, R-Missouri, to investigate.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>“We made ChatGPT pretty restrictive to make sure we were being careful with mental health issues. We realize this made it less useful/enjoyable to many users who had no mental health problems, but given the seriousness of the issue, we wanted to get this right,” Altman wrote.\u003c/p>\n\u003cp>The announcement came less than two months after the company was sued by the parents of Adam Raine, a teenager who \u003ca href=\"https://www.kqed.org/news/12054490/child-safety-groups-demand-mental-health-guardrails-after-california-teens-suicide-using-chatgpt\">died by suicide\u003c/a> earlier this year, for ChatGPT allegedly providing him with specific advice on how to kill himself — setting off a firestorm of news coverage and comment.\u003c/p>\n\u003cfigure id=\"attachment_11989313\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11989313\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/06/GettyImages-2155035557-scaled-e1760733694503.jpg\" alt=\"\" width=\"2000\" height=\"1334\">\u003cfigcaption class=\"wp-caption-text\">The OpenAI ChatGPT logo. \u003ccite>(Jaap Arriens/NurPhoto via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Altman delivered \u003ca href=\"https://x.com/sama/status/1978539332215681076\">a follow-up\u003c/a> on Wednesday. “We will still not allow things that cause harm to others, and we will treat users who are having mental health crises very different from users who are not … But we are not the elected moral police of the world. In the same way that society differentiates other appropriate boundaries (R-rated movies, for example), we want to do a similar thing here,” Altman wrote, although it remains unclear whether OpenAI will extend erotica to its AI voice, image and video generation tools.\u003c/p>\n\u003cp>“Comparing content moderation of chatbot interactions with movie ratings is not really useful,” wrote Irina Raicu, director of the Internet Ethics program at the Markkula Center for Applied Ethics at Santa Clara University. “It downplays both the nature and the extent of the problems that we’re seeing when people get more and more dependent on and influenced by chatbot ‘relationships.’”\u003c/p>\n\u003cp>Mark Cuban, the entrepreneur, investor and media personality, argued much the same in a string of \u003ca href=\"https://x.com/mcuban/status/1978317936336028016\">posts on X\u003c/a>.\u003c/p>\n\u003cp>“I don’t see how OpenAI can age-gate successfully enough. I’m also not sure that it can’t psychologically damage young adults. We just don’t know yet how addictive LLMs can be. Which, in my OPINION, means that parents and schools, that would otherwise want to use ChatGPT because of its current ubiquity, will decide not to use it,” Cuban wrote.[aside postID=news_12059714 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1.jpg']Others see the drive for paying subscribers and increased profit behind the move. As a private company, OpenAI does not release its shareholder reports publicly. However, \u003ca href=\"https://www.bloomberg.com/news/articles/2025-10-02/openai-completes-share-sale-at-record-500-billion-valuation?accessToken=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzb3VyY2UiOiJTdWJzY3JpYmVyR2lmdGVkQXJ0aWNsZSIsImlhdCI6MTc2MDcxODQwMSwiZXhwIjoxNzYxMzIzMjAxLCJhcnRpY2xlSWQiOiJUM0hLMkNHUFdDSEIwMCIsImJjb25uZWN0SWQiOiJBM0VCRjM5ODM4RDc0RDI4QUJDREM4MDZDMDA5RTVBMiJ9.ADGZysjoeNVhUDWXwiuAxieyKueee-676dgJIAM9BvQ\">Bloomberg\u003c/a> recently reported that OpenAI has completed a deal to help employees sell shares in the company at a $500 billion valuation. According to Altman, ChatGPT is already used by \u003ca href=\"https://techcrunch.com/2025/10/06/sam-altman-says-chatgpt-has-hit-800m-weekly-active-users/\">800 million weekly active users\u003c/a>. With so much investment at stake, OpenAI is under pressure to grow its subscriber base. The company has also raised billions of dollars for a historic infrastructure buildout, an investment OpenAI eventually needs to pay back.\u003c/p>\n\u003cp>“It is no secret that sexual content is one of the most popular and lucrative aspects of the internet,” wrote Jennifer King, a privacy and data policy fellow at the Stanford University Institute for Human-Centered Artificial Intelligence. She noted that nearly 20 U.S. states have passed laws \u003ca href=\"https://www.axios.com/2025/01/16/adult-website-age-verification-states\">requiring age verification for online adult content\u003c/a> sites.\u003c/p>\n\u003cp>“By openly embracing business models that allow access to adult content, mainstream providers like OpenAI will face the burden of demonstrating that they have robust methods for excluding children under 18 and potentially adults under the age of 21,” King said.\u003c/p>\n\u003cp>AI chatbots appear to be going the way of social media, said California Assemblymember Rebecca Bauer-Kahan, D-San Ramon, whose bill that would have required child safety guardrails for companion chatbots was \u003ca href=\"https://www.kqed.org/news/12059714/newsom-vetoes-most-watched-childrens-ai-bill-signs-16-others-targeting-tech\">vetoed earlier this week\u003c/a>.\u003c/p>\n\u003cfigure id=\"attachment_11802216\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11802216\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut.jpg\" alt=\"Assemblymember Rebecca Bauer-Kahan says local jurisdictions need the power to stop a wildfire disaster before it starts. The assemblymember and other state lawmakers announced a bill to expand enforcement actions against PG&E and other utilities on February, 18, 2020.\" width=\"1920\" height=\"1440\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-160x120.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-800x600.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1020x765.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1832x1374.jpg 1832w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1376x1032.jpg 1376w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1044x783.jpg 1044w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-632x474.jpg 632w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-536x402.jpg 536w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Assemblymember Rebecca Bauer-Kahan on Feb. 18, 2020. \u003ccite>(Eli Walsh/Bay City News)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“My fear is that we are on a path to creating the next, frankly, more addictive, more harmful version of social media for our children,” Bauer-Kahan told KQED. “I do not think that the addictive features in these chatbots that result in our children having relationships with a chatbot instead of their fellow humans is a positive thing, and the experts \u003ca href=\"https://cdt.org/insights/hand-in-hand-schools-embrace-of-ai-connected-to-increased-risks-to-students/\">confirm that\u003c/a>.”\u003c/p>\n\u003cp>OpenAI did not comment for this story, but the company has written that it’s \u003ca href=\"https://openai.com/index/teen-safety-freedom-and-privacy/\">working\u003c/a> on an under-18 version of ChatGPT, which will redirect minors to age-appropriate content. A couple of weeks ago, OpenAI announced it’s rolling out safety features for minors, including an age prediction system and a way for \u003ca href=\"https://openai.com/index/introducing-parental-controls/\">parents\u003c/a> to control their teens’ ChatGPT accounts. This week, OpenAI announced the formation of \u003ca href=\"https://openai.com/index/expert-council-on-well-being-and-ai/\">an expert council \u003c/a>of mental health professionals to advise the company on well-being and AI.\u003c/p>\n\u003cp>In mid-September, the Federal Trade Commission launched an \u003ca href=\"https://www.ftc.gov/news-events/news/press-releases/2025/09/ftc-launches-inquiry-ai-chatbots-acting-companions\">inquiry\u003c/a> into seven AI chatbot developers, including xAI, Meta and OpenAI, “seeking information on how these firms measure, test, and monitor potentially negative impacts of this technology on children and teens.”\u003c/p>\n\u003cp>For the most part, a couple of dozen \u003ca href=\"https://techcrunch.com/2025/09/06/the-growing-debate-over-expanding-age-verification-laws/\">states\u003c/a> and their \u003ca href=\"https://oag.ca.gov/system/files/attachments/press-docs/AI%20Chatbot_FINAL%20%2844%29.pdf\">attorneys general\u003c/a> have taken the lead on regulation, enacting measures like age verification and requiring many online platforms to verify users’ identities before granting access. East Bay Assemblymember Buffy Wicks won the \u003ca href=\"https://a14.asmdc.org/press-releases/20250909-google-meta-among-tech-leaders-and-child-advocates-voicing-support-wicks\">support of major tech\u003c/a> companies for her measure, \u003ca href=\"https://a14.asmdc.org/press-releases/20250602-asm-wicks-bill-protect-kids-online-passes-assembly-bipartisan-support\">AB 1043\u003c/a>, which was just signed into law by Gov. Gavin Newsom.\u003c/p>\n\u003cp>But any parent knows it’s easy for children to sidestep those controls, or reach out to older siblings or friends who can help them, Bauer-Kahan said. She said she sees a coincidence in the fact that the veto of her toughest bill was announced on Monday, and Altman’s announcement was posted on Tuesday.\u003c/p>\n\u003cp>“Here was a bill that was really requiring very clear, safe-by-design AI for children with real liability. And I think that was further than the industry wanted California to go. I just found the timing of the veto and then this announcement about access to erotica too coincidental not to call out,” she said.\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "OpenAI’s announcement this week that erotic content will soon be available to adults reflects a growing trend. Some researchers and Bay Area politicians are worried about the effects. ",
"status": "publish",
"parent": 0,
"modified": 1760988336,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 20,
"wordCount": 1189
},
"headData": {
"title": "ChatGPT Will Soon Allow Adults to Generate Erotica. Is This the Future We Want? | KQED",
"description": "OpenAI’s announcement this week that erotic content will soon be available to adults reflects a growing trend. Some researchers and Bay Area politicians are worried about the effects. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "ChatGPT Will Soon Allow Adults to Generate Erotica. Is This the Future We Want?",
"datePublished": "2025-10-17T13:54:05-07:00",
"dateModified": "2025-10-20T12:25:36-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"sticky": false,
"nprStoryId": "kqed-12060365",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12060365/chatgpt-will-soon-allow-adults-to-generate-erotica-is-this-the-future-we-want",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>OpenAI isn’t the first developer to announce plans to \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">offer erotic content on its chatbot\u003c/a>. But the blowback against the tech company’s decision to loosen restrictions this week has been bigger, given the San Francisco-based company’s promise to ensure its AI\u003ca href=\"https://openai.com/our-structure/\"> benefits all of humanity\u003c/a>.\u003c/p>\n\u003cp>The most significant change will roll out in December, when OpenAI will allow more comprehensive age-gating, allowing verified adults to generate erotic content using the tool — “as part of our ‘treat adult users like adults’ principle,” OpenAI CEO Sam \u003ca href=\"https://x.com/sama/status/1978129344598827128\">Altman posted Tuesday\u003c/a> on the social media platform X.\u003c/p>\n\u003cp>Consumer advocates say OpenAI is following the lead of xAI’s Grok, which offers loosely moderated “adult” modes with minimal age verification, raising concerns that teenage users may have access to explicit content. Meta AI is believed to be following xAI’s lead as well, and its back and forth over whether it is intentionally pushing mature content to minors has \u003ca href=\"https://www.reuters.com/world/us/us-senator-hawley-launches-probe-into-meta-ai-policies-2025-08-15/\">prompted\u003c/a> U.S. Sen. Josh Hawley, R-Missouri, to investigate.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>“We made ChatGPT pretty restrictive to make sure we were being careful with mental health issues. We realize this made it less useful/enjoyable to many users who had no mental health problems, but given the seriousness of the issue, we wanted to get this right,” Altman wrote.\u003c/p>\n\u003cp>The announcement came less than two months after the company was sued by the parents of Adam Raine, a teenager who \u003ca href=\"https://www.kqed.org/news/12054490/child-safety-groups-demand-mental-health-guardrails-after-california-teens-suicide-using-chatgpt\">died by suicide\u003c/a> earlier this year, for ChatGPT allegedly providing him with specific advice on how to kill himself — setting off a firestorm of news coverage and comment.\u003c/p>\n\u003cfigure id=\"attachment_11989313\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11989313\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2024/06/GettyImages-2155035557-scaled-e1760733694503.jpg\" alt=\"\" width=\"2000\" height=\"1334\">\u003cfigcaption class=\"wp-caption-text\">The OpenAI ChatGPT logo. \u003ccite>(Jaap Arriens/NurPhoto via Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Altman delivered \u003ca href=\"https://x.com/sama/status/1978539332215681076\">a follow-up\u003c/a> on Wednesday. “We will still not allow things that cause harm to others, and we will treat users who are having mental health crises very different from users who are not … But we are not the elected moral police of the world. In the same way that society differentiates other appropriate boundaries (R-rated movies, for example), we want to do a similar thing here,” Altman wrote, although it remains unclear whether OpenAI will extend erotica to its AI voice, image and video generation tools.\u003c/p>\n\u003cp>“Comparing content moderation of chatbot interactions with movie ratings is not really useful,” wrote Irina Raicu, director of the Internet Ethics program at the Markkula Center for Applied Ethics at Santa Clara University. “It downplays both the nature and the extent of the problems that we’re seeing when people get more and more dependent on and influenced by chatbot ‘relationships.’”\u003c/p>\n\u003cp>Mark Cuban, the entrepreneur, investor and media personality, argued much the same in a string of \u003ca href=\"https://x.com/mcuban/status/1978317936336028016\">posts on X\u003c/a>.\u003c/p>\n\u003cp>“I don’t see how OpenAI can age-gate successfully enough. I’m also not sure that it can’t psychologically damage young adults. We just don’t know yet how addictive LLMs can be. Which, in my OPINION, means that parents and schools, that would otherwise want to use ChatGPT because of its current ubiquity, will decide not to use it,” Cuban wrote.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12059714",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GavinNewsomAISF1.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Others see the drive for paying subscribers and increased profit behind the move. As a private company, OpenAI does not release its shareholder reports publicly. However, \u003ca href=\"https://www.bloomberg.com/news/articles/2025-10-02/openai-completes-share-sale-at-record-500-billion-valuation?accessToken=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzb3VyY2UiOiJTdWJzY3JpYmVyR2lmdGVkQXJ0aWNsZSIsImlhdCI6MTc2MDcxODQwMSwiZXhwIjoxNzYxMzIzMjAxLCJhcnRpY2xlSWQiOiJUM0hLMkNHUFdDSEIwMCIsImJjb25uZWN0SWQiOiJBM0VCRjM5ODM4RDc0RDI4QUJDREM4MDZDMDA5RTVBMiJ9.ADGZysjoeNVhUDWXwiuAxieyKueee-676dgJIAM9BvQ\">Bloomberg\u003c/a> recently reported that OpenAI has completed a deal to help employees sell shares in the company at a $500 billion valuation. According to Altman, ChatGPT is already used by \u003ca href=\"https://techcrunch.com/2025/10/06/sam-altman-says-chatgpt-has-hit-800m-weekly-active-users/\">800 million weekly active users\u003c/a>. With so much investment at stake, OpenAI is under pressure to grow its subscriber base. The company has also raised billions of dollars for a historic infrastructure buildout, an investment OpenAI eventually needs to pay back.\u003c/p>\n\u003cp>“It is no secret that sexual content is one of the most popular and lucrative aspects of the internet,” wrote Jennifer King, a privacy and data policy fellow at the Stanford University Institute for Human-Centered Artificial Intelligence. She noted that nearly 20 U.S. states have passed laws \u003ca href=\"https://www.axios.com/2025/01/16/adult-website-age-verification-states\">requiring age verification for online adult content\u003c/a> sites.\u003c/p>\n\u003cp>“By openly embracing business models that allow access to adult content, mainstream providers like OpenAI will face the burden of demonstrating that they have robust methods for excluding children under 18 and potentially adults under the age of 21,” King said.\u003c/p>\n\u003cp>AI chatbots appear to be going the way of social media, said California Assemblymember Rebecca Bauer-Kahan, D-San Ramon, whose bill that would have required child safety guardrails for companion chatbots was \u003ca href=\"https://www.kqed.org/news/12059714/newsom-vetoes-most-watched-childrens-ai-bill-signs-16-others-targeting-tech\">vetoed earlier this week\u003c/a>.\u003c/p>\n\u003cfigure id=\"attachment_11802216\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11802216\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut.jpg\" alt=\"Assemblymember Rebecca Bauer-Kahan says local jurisdictions need the power to stop a wildfire disaster before it starts. The assemblymember and other state lawmakers announced a bill to expand enforcement actions against PG&E and other utilities on February, 18, 2020.\" width=\"1920\" height=\"1440\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-160x120.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-800x600.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1020x765.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1832x1374.jpg 1832w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1376x1032.jpg 1376w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-1044x783.jpg 1044w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-632x474.jpg 632w, https://cdn.kqed.org/wp-content/uploads/sites/10/2020/02/RS41373_IMG_0396-qut-536x402.jpg 536w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Assemblymember Rebecca Bauer-Kahan on Feb. 18, 2020. \u003ccite>(Eli Walsh/Bay City News)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“My fear is that we are on a path to creating the next, frankly, more addictive, more harmful version of social media for our children,” Bauer-Kahan told KQED. “I do not think that the addictive features in these chatbots that result in our children having relationships with a chatbot instead of their fellow humans is a positive thing, and the experts \u003ca href=\"https://cdt.org/insights/hand-in-hand-schools-embrace-of-ai-connected-to-increased-risks-to-students/\">confirm that\u003c/a>.”\u003c/p>\n\u003cp>OpenAI did not comment for this story, but the company has written that it’s \u003ca href=\"https://openai.com/index/teen-safety-freedom-and-privacy/\">working\u003c/a> on an under-18 version of ChatGPT, which will redirect minors to age-appropriate content. A couple of weeks ago, OpenAI announced it’s rolling out safety features for minors, including an age prediction system and a way for \u003ca href=\"https://openai.com/index/introducing-parental-controls/\">parents\u003c/a> to control their teens’ ChatGPT accounts. This week, OpenAI announced the formation of \u003ca href=\"https://openai.com/index/expert-council-on-well-being-and-ai/\">an expert council \u003c/a>of mental health professionals to advise the company on well-being and AI.\u003c/p>\n\u003cp>In mid-September, the Federal Trade Commission launched an \u003ca href=\"https://www.ftc.gov/news-events/news/press-releases/2025/09/ftc-launches-inquiry-ai-chatbots-acting-companions\">inquiry\u003c/a> into seven AI chatbot developers, including xAI, Meta and OpenAI, “seeking information on how these firms measure, test, and monitor potentially negative impacts of this technology on children and teens.”\u003c/p>\n\u003cp>For the most part, a couple of dozen \u003ca href=\"https://techcrunch.com/2025/09/06/the-growing-debate-over-expanding-age-verification-laws/\">states\u003c/a> and their \u003ca href=\"https://oag.ca.gov/system/files/attachments/press-docs/AI%20Chatbot_FINAL%20%2844%29.pdf\">attorneys general\u003c/a> have taken the lead on regulation, enacting measures like age verification and requiring many online platforms to verify users’ identities before granting access. East Bay Assemblymember Buffy Wicks won the \u003ca href=\"https://a14.asmdc.org/press-releases/20250909-google-meta-among-tech-leaders-and-child-advocates-voicing-support-wicks\">support of major tech\u003c/a> companies for her measure, \u003ca href=\"https://a14.asmdc.org/press-releases/20250602-asm-wicks-bill-protect-kids-online-passes-assembly-bipartisan-support\">AB 1043\u003c/a>, which was just signed into law by Gov. Gavin Newsom.\u003c/p>\n\u003cp>But any parent knows it’s easy for children to sidestep those controls, or reach out to older siblings or friends who can help them, Bauer-Kahan said. She said she sees a coincidence in the fact that the veto of her toughest bill was announced on Monday, and Altman’s announcement was posted on Tuesday.\u003c/p>\n\u003cp>“Here was a bill that was really requiring very clear, safe-by-design AI for children with real liability. And I think that was further than the industry wanted California to go. I just found the timing of the veto and then this announcement about access to erotica too coincidental not to call out,” she said.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12060365/chatgpt-will-soon-allow-adults-to-generate-erotica-is-this-the-future-we-want",
"authors": [
"251"
],
"categories": [
"news_8",
"news_13",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_34755",
"news_32668",
"news_29886",
"news_2109",
"news_33542",
"news_22456",
"news_33543",
"news_38",
"news_34586",
"news_1631",
"news_21121",
"news_20385"
],
"featImg": "news_12060375",
"label": "news"
},
"news_12059911": {
"type": "posts",
"id": "news_12059911",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12059911",
"score": null,
"sort": [
1760522453000
]
},
"guestAuthors": [],
"slug": "beyond-the-ai-hype-machine",
"title": "Beyond the AI Hype Machine",
"publishDate": 1760522453,
"format": "audio",
"headTitle": "Beyond the AI Hype Machine | KQED",
"labelTerm": {},
"content": "\u003cp>\u003ca href=\"#episode-transcript\">\u003ci>View the full episode transcript.\u003c/i>\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">When ChatGPT launched in 2022, it kicked off what some have called the “AI hype machine” — a frenzy of promotion and investment that has sent some tech companies’ valuations soaring to record heights. Meanwhile, computational linguist Emily M. Bender and AI researcher and sociologist Alex Hanna have proudly worn the titles of “AI hype busters,” critiquing the industry’s loftiest claims and pointing out the real-world harms behind this wave of excitement. What began as a satirical podcast is now a book, \u003c/span>\u003ca href=\"https://thecon.ai/\">\u003ci>\u003cspan style=\"font-weight: 400\">The AI Con\u003c/span>\u003c/i>\u003c/a>\u003cspan style=\"font-weight: 400\">:\u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\"> How to Fight Big Tech’s Hype and Create the Future We Want\u003c/span>\u003c/i>\u003cspan style=\"font-weight: 400\">. In this episode, Alex and Emily explain why the very term “AI” is misleading, how AI boosters and doomers are really flip sides of the same coin, and why we should question the AI inevitability narrative. \u003c/span>\u003c/p>\n\u003cp>\u003c!-- iframe plugin v.4.3 wordpress.org/plugins/iframe/ -->\u003cbr>\n\u003ciframe loading=\"lazy\" frameborder=\"0\" height=\"200\" scrolling=\"no\" src=\"https://playlist.megaphone.fm/?e=KQINC5696998106\" width=\"100%\" class=\"iframe-class\">\u003c/iframe>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Guests: \u003c/span>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://bsky.app/profile/emilymbender.bsky.social\">\u003cspan style=\"font-weight: 400\">Emily Bender\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">, professor of linguistics the University of Washington\u003c/span>\u003c/li>\n\u003cli>\u003ca href=\"https://bsky.app/profile/alexhanna.bsky.social\">Alex Hanna\u003c/a>, director of research at the Distributed AI Research Institute\u003c/li>\n\u003c/ul>\n\u003cp>\u003cspan style=\"font-weight: 400\">Further reading/listening: \u003c/span>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://thecon.ai/\">\u003cspan style=\"font-weight: 400\">The AI Con: How to Fight Big Tech’s Hype and Create the Future We Want\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Emily Bender and Alex Hanna\u003c/span>\u003c/li>\n\u003cli>\u003ca href=\"https://www.dair-institute.org/maiht3k/\">\u003cspan style=\"font-weight: 400\">The Mystery AI Hype Theater 3000 Podcast\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Emily M. Bender and Alex Hanna\u003c/span>\u003c/li>\n\u003cli>\u003ca href=\"https://www.techpolicy.press/ai-hurts-consumers-and-workers-and-isnt-intelligent/\">\u003cspan style=\"font-weight: 400\">“AI” Hurts Consumers and Workers — and Isn’t Intelligent\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Emily Bender and Alex Hanna, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">Tech Policy Press\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://lithub.com/on-the-very-real-dangers-of-the-artificial-intelligence-hype-machine/\">\u003cspan style=\"font-weight: 400\">On the Very Real Dangers of the Artificial Intelligence Hype Machine: Emily M. Bender and Alex Hanna Explore AI History, the Cold War, and a Fatally Overhyped Idea \u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">— Emily M. Bender, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">LitHub\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://www.404media.co/sora-2-content-violation-guardrails-error/\">\u003cspan style=\"font-weight: 400\">People Are Crashing Out Over Sora 2’s New Guardrails\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Samantha Cole, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">404 Media\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://futurism.com/artificial-intelligence/sora-2-financial-problem\">\u003cspan style=\"font-weight: 400\">Sora 2 Has a Huge Financial Problem\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Victor Tangermann, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">Futurism\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://www.technologyreview.com/2025/05/20/1116327/ai-energy-usage-climate-footprint-big-tech/\">\u003cspan style=\"font-weight: 400\">We did the math on AI’s energy footprint. Here’s the story you haven’t heard.\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — James O’Donnell and Casey Crownhart, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">MIT Technology Review\u003c/span>\u003c/i>\u003c/li>\n\u003c/ul>\n\u003cp>Want to give us feedback on the show? Shoot us an email at \u003ca href=\"mailto:CloseAllTabs@KQED.org\">CloseAllTabs@KQED.org\u003c/a>\u003c/p>\n\u003cp>\u003ca href=\"https://www.instagram.com/closealltabspod/\">Follow us on Instagram\u003c/a>\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003ch2>\u003c/h2>\n\u003ch2 id=\"episode-transcript\">Episode Transcript\u003c/h2>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">This is a computer-generated transcript. While our team has reviewed it, there may be errors.\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">A few weeks ago, OpenAI launched an app, Sora. It’s a vertical video social platform, similar to TikTok, except all the videos are generated by the company’s AI image generator, Sora 2. Within days, the app was a copyright infringement nightmare. There were videos of SpongeBob cooking meth, unsanctioned Rick and Morty ads for crypto startups, and many, many videos of open AI CEO Sam Altman doing depraved things to copyrighted characters. Like the one where he brutally barbecues and carves up Pikachu. \u003c/span>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">[AI CEO Sam Altman] Pikachu on the grill here. It’s already got a beautiful char and it smells like somebody plugged in a chicken. Let’s give it a flip. I’m gonna carve it into some thick steaks. Look at that. Crust on the outside, pink and juicy in the middle. Cheers.\u003c/span>\u003c/i>\u003cspan style=\"font-weight: 400\"> \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">All of these 10 second videos require an immense amount of computing power, which is extremely costly to maintain. In a blog post, Sam Altman admitted that the company still needs to figure out how to make money off of Sora. He wrote, “People are generating much more than we expected per user, and a lot of videos are being generated for very small audiences.” Facing heat from copyright holders like Disney and Nintendo, Altman also announced extra guardrails for the app to curb infringement. Now, users are complaining that everything they try to generate using Sora 2 gets flagged as a violation of the copyrighted content policy. They’re already getting bored of the app. This whole cycle has been described as the AI hype machine. Big investments are made based on big promises of innovation, disruption, revolution. This hype fuels more investment, which, in turn, fuels the hype. The cycle continues when a new product launches. Meta, for example, launched its own AI social video app, called Vibes, last month too, which was quickly forgotten about when Sora launched. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">AI hype is effectively premised on fear of missing out. It is the fear that if you don’t get onto this new technology, you are going to be left behind. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">That’s Alex Hanna, a sociologist and the Director of Research at the Distributed AI Research Institute. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">If you’re a corporate manager, you’re going to have your competitors just leave you in the dust. If you are a teacher, you are doing a disservice to your students by not preparing them for the job market of the future. If you were a student, you were going to miss out on all the skills and all your classmates are going to be outperforming you. And as a worker, you will be doing things the old way, the analog way, and everyone is going to be outpacing you. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Alex and her co-author, Emily M. Bender, recently published a book, The AI Con, How to Fight Big Tech’s Hype and Create the Future We Want. Emily runs the computational linguistics program at the University of Washington. This is a field of study that combines human language with machine learning. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">I often get asked the question, well aren’t you worried that students are going to get left behind? Etc. And my answer to that is often, where is everybody going? Like, this metaphor of left behind suggests that people are running off into some brilliant future. I just don’t see it, you know, setting aside the fact that the technology doesn’t do what it’s being sold to do, but that is overhyped and over promised. The idea that we’d be better off with instead of interacting with people at all stages, interacting with screens that that’s just not the future that I want. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">In this episode, we’re talking about the AI hype machine, when it started, how it’s fed, and why a growing corner of critics say they see right through it. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">This is Close All Tabs. I’m Morgan Sung, tech journalist and your chronically online friend, here to open as many browser tabs as it takes to help you understand how the digital world affects our real lives. Let’s get into it. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">All right, like we always do, we’re starting by opening a new tab. What is P-Doom? In their book, The AI Con, Alex and Emily talk about these two groups. There are the AI boosters, the people who are optimistic that AI will pave the way to our utopian future. Then there are the AI doomers: the people that catastrophize, and believe that AI progress will usher in an era of societal collapse and human extinction. It’s very Matrix. \u003c/span>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">[Clip from the film “The Matrix] The Matrix is a system, Neo. That system is our enemy. \u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Morgan Sung \u003c/b>\u003cspan style=\"font-weight: 400\">But before we break this down further, let’s start by defining our terms. Here’s Emily. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">Artificial intelligence does not refer to a coherent set of technologies, and it has throughout its history, since it was coined by John McCarthy in 1955, basically been used to sell this idea of some magic do-everything technology in order to get money. Initially, it was research funding and then DOD money and now a lot of it is venture capital money. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, and the way that this has proliferated in the modern day is that so many things get called AI. So that could be automated decision-making systems used for determining whether someone gets social services. And so that gets looped in, and then we also get recommendation systems, things like the TikTok algorithm, the Instagram Reels algorithm, pick your short-form video. But then, it’s really manifest in these large language models and diffusion models that are looped into the category of generative AI.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">You start this book from this one moment in 2023, when Chuck Schumer at the time, the Senate majority leader, held a series of forums around AI. Can you take us back to that moment and like set the scene for us? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">So, late 2023, Chuck Schumer is convening the eighth of total nine Senate Insight forums around AI, and he asks folks, this is very weird, he asks, “what is folks’ probability of doom?” And this is abbreviated as P(doom), and for this instance, it’s an audio platform, that is P, open parentheses, doom, closed parentheses. And he also asked, “what people’s pee hope is.” So this means what is your probability that there’s going to be some kind of a doom scenario, in which through, you know, hook or crook, some kind of thing called AI is going to outperform or outsmart humans and take over and lead to human extinction. And in the book, we start and we say, well, this is the wrong question. But also if you’re looking at harms that are happening in the here and now, there are many that exist, whether that be deep fake porn being made out of non-consensual adults and children, the use of automated decision-making and weapons targeting, especially in Gaza, and then we also talk about students having their exams effectively being judged by these automated tools. So talking about P(doom) in this register is asking the wrong question and focusing on the wrong things. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">But oftentimes it looks like the doomers, the people with a high P(doom) value, the people who take that question seriously in the first place, um, and the boosters, the people who say this is gonna solve all our problems, are like the opposite ends of a spectrum. And that is how these people present themselves, it is how the media often presents what’s going on, and it is very misleading. I think that one of the points that we make is that doomerism is another kind of AI hype, because it’s saying, our system is very powerful. It’s so powerful, it’s going to kill us all, is a way of saying it’s very powerful, but also we make the point that the doomers and the boosters are two sides of the same coin. And it, I think, becomes very clear if you look at it this way, which is to say, the doomers say, “AI is a thing, it’s imminent, it’s inevitable, and it’s gonna kill us all.” And the boosters say, “AI’s a thing, it’s imminent, it’s inevitable, and it is gonna solve all of our problems.” And it’s pretty easy to see these are the same position with just a different twist at the end. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">And the funny thing about this boosterism, doomerism dichotomy is that these are many of the same people or they run in many of same circles. So, you know, there was this document that was put out called AI 2027, in which it ends with humanity dying and the kind of choose your own adventure. There’s only two endings here. The choose your own adventure and one of them, you know everyone dies. But the lead author of this works at OpenAI. And there’s many such cases of people who are working on quote unquote, “AI alignment”, who are in these industries. So, it’s again not as if they’re against the building of AI, or we should just say no, it’s actually a very narrow segment of people. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">You described this industry as the AI hype machine, the modern AI hype machine, what does it look like? I mean, who are the players? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, I mean, the players are many of the big tech players that we know. So Microsoft, Google, Amazon, Meta, but with some new entrants, OpenAI being the most significant one. Um, and along with OpenAI, a few offshoots, so Anthropic is kind of the most notable one. And then the company that’s creating the shovels for the gold rush, so that’s your Nvidia, and then your Taiwanese semiconductor manufacturing company, abbreviated as TSMC. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">I want to say that we see AI hype not just originating from those big players, like that is a large source of it. Also we hear over and over and again about people working in various businesses being told by their higher ups that they have to try this new AI thing. And so there’s this sort of secondary promulgation of hype that comes from middle management and up that have been sold on the idea that this is going to, you know, really increase productivity. And, you know, on the one hand, it’s a very useful excuse for doing layoffs that they may have otherwise already wanted to do, but then on the other hand, some people seem to have really bought into the idea. So they tell the people working for them, you have to spend time figuring out how to make yourself more productive by using these so-called AI tools, because everyone’s telling me that that’s the way of the future. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">I mean, the obvious way people are, these players, are feeding into the AI hype machine is by extolling the virtues of AI, or, you know, kind of spreading this very doomerous sci-fi rhetoric. But what other strategies are being used to feed this machine? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">So one important strategy is what I sometimes call citations to the future. So people will say, yeah, yeah. It’s got problems now, but it’s going to do all of these things. And I think it really is the only technology that we are expected to evaluate based on promises of what it will be doing, right? That car that I just bought only gets, you know, 35 miles to the gallon. But that’s OK, because the later one’s going to get 50. We don’t talk about it that way, except with the so-called AI technology.\u003c/span>\u003c/p>\n\u003cp>So, citations to the future is one big strategy and another one is anthropomorphizing language, talking about things that have happened as if the computer systems themselves did it of their own volition and autonomously instead of people having used the system to do it or done something in order to build the system. So it’ll be something like, AI needs lots and lots of data. Well, no, people who want to build the system that they’re calling AI are amassing lots and lots of data in order to build them, or AI is thirsty, it needs lots of water, or AI was able to identify, you know, something in a blurry image. It’s like — in no sense, right? \u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">People used XYZ tool in order do a thing, or in order to build these tools, they are using lots of of water and so on. So this anthropomorphizing language sort of shifts the people out of the frame and hides a bunch of accountability, and at the same time, makes the systems sound cooler than they are. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Alex and Emily also pointed out that players in the AI industry push this adoption of AI into our everyday lives by really trying to humanize the product. We’re gonna dive into that in a new tab. First, a quick break. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Time for a new tab! Are we really just meat machines?\u003cbr>\n\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Let’s talk about the technology itself, like the way people talk about large language models as AI, um, ChatGPT, Claude, Grok. Many people understand that these models are basically predicting the words that most often go together. But can you break it down further? Like, what’s really going on under the hood there? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">So, the first very important lesson is that when we say word, we’re actually talking about two things. We’re talking about the way the word is spelled and pronounced and what it is used to mean. And one thing that makes that hard to keep in mind is that as proficient speakers of the languages we speak, pretty much anytime we encounter the spelling or sound of a word, we are also encountering what the person using it is using it to talk about. And so we always experience the form and meaning together. But a language model, so that the core component of something like Gemini or Grok or Claude or ChatGPT is literally a system for modeling which bits of words go with which other bits of words in whatever the input collection of text was to create that model. And so what we have are models that are very good at putting literally like spellings of parts of words next to each other in a way that looks like something somebody might say. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Emily and Alex have come up with a few phrases that illustrate what large language models really are, which also describe the limitations of this tech. We’ve got synthetic text extruding machine. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">The choice of the word extrude is very intentional because it’s a little gross. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Racist pile of linear algebra. Spicy autocomplete. And one phrase that really took off, stochastic parrot. Emily coined the phrase in a research paper she co-authored in 2020. Parrots can mimic human speech, but whether they can really comprehend it, that’s dubious. Stochastic comes from probability theory. It means randomly determined. So a stochastic parrot essentially mimics language in a random order and does so convincingly, but it doesn’t understand it. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">Starting with OpenAI’s GPT-2 and GPT3, they were using it to create synthetic text. And so one of the things we worried about in that paper is what happens if someone comes across synthetic text and doesn’t know that it was synthetic? What we didn’t realize at the time is that people would be happy to look at synthetic text while knowing that it’s synthetic. That is very surprising to me. And so the phrase stochastic parrots was this attempt to make vivid what’s going on, to help people understand why the output of a language model run to repeatedly answer the question, what’s a likely next word, is not the same thing as text produced by a person or group of people with something to communicate. And what’s happened, it’s been fascinating as a linguist to watch that phrase go out into the world, so for the first little while, it was people referring to the paper, and then it sort of became people talking about, um, that claim that large language models are not understanding, they’re just repeatedly predicting a likely next word. And then it got picked up or interpreted as an insult, which is surprising to me because in order for it to be an insult, the thing that it’s being applied to would have to the kind of thing that could be insulted. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Then in 2022, Sam Altman tweeted, I am a stochastic parrot and so are you. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">I think what happens when Sam Altman picks it up and tweets that is that it is, on the one hand, sort of an attempt to reclaim what is understood as an insult or slur by people in that mindset, but also, and very importantly, it is about minimizing what it is to be human, so that he can claim that the system that he’s built is as good as a person.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Emily and Alex say this concept of comparing humans to, essentially, flesh machines is a classic move in the AI hype machine playbook. It’s reducing humanity and what it means to be human to programming, like Eliza in the 60s. Eliza was an early natural language processing program designed to mimic a therapist. Think of it as a great, great, great, grand chatbot of ChatGPT. A lot of people, from academics to government leaders to tech industry giants, bought into the Eliza hype. And that freaked out Eliza’s own creator, Joseph Weizenbaum. In a book he published in the 70s, Weizenbaum warned that machines would never be able to make the same decisions that humans make because they don’t have human empathy. His criticism of AI caused a stir in the research community. And decades later, AI boosters are still making that same claim. That humans and machines aren’t that different. But what does this devaluing of humanity really mean for us? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, I mean, it means a lot of things. It really seems to emphasize that there is, kind of, aspects of human behavior that can just be reduced to our observable outputs, right? Humans are just things that output language or output actions, when that’s not true. Humans have a much more vivid internal life. Um, we think about others. Uh, we think about, kind of, co-presence, but it’s more about saying how we’re comparing ourselves to machines that are programmed by people and those people in those institutions have particular types of incentives to make machines that behave as such. So that’s the kind of implications that it has and it also has the implications of other kinds of moves into humanism, dehumanization and what that does and how we treat people and with regards to dignity and propriety of rights. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Can you also give concrete examples of where we see this kind of, uh, devaluing of humans? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">So I think if we say that humans can be reduced to their outputs, that that leads to lots of problems. And one is we end up saying, you know, the form of, or the words that teachers and students say in the classroom is the learning situation. And so we can replace the teacher with a system for outputting words and then those students will get as much and maybe it’ll be personalized and it’ll better. And that is dehumanizing to teachers clearly and also to students because it removes, you know, everything that is about the student and teacher’s internal life and about their relationship and about their community from the situation. But I think it’s also really important in terms of the workforce more generally, that basically if we say, well, humans like large language models are systems for outputting words, then it’s a very small step to basically saying the whole value of this person is how many words they can output and doing a very, very dehumanizing work environment to people. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">We also see this in other domains like the Amazon work floor and the ways that these mini robots flit from place to place and the so-called quote unquote pickers. People on Amazon work warehouses have to pick things and then deliver them. So there’s a lot of implications for that and I think also in seeing the humanity in other folks and how we treat other folks. You know, if they’re merely meat machines, then what does it say about how we view them with respect to, kind of, personal rights and human rights and what kind of rights they should be afforded? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">This idea of human beings just being walking meat machines is chilling. It definitely creeped me out. What are the other real world consequences of this thinking? Let’s open a new tab. Who’s really harmed by AI hype? Alex and Emily have said that their goal with writing the AI con is to reduce the harm caused by AI hype. Automation, for example, doesn’t just replace jobs. Healthcare providers are increasingly relying on AI products for medical triage to decide which patients to see first. Free legal representation, a guaranteed right in criminal cases, can be replaced by a lawyer using a chat bot. All of this potentially lowers the quality of these services. And introduces bias into these systems. Artists and other creatives, meanwhile, are struggling to make ends meet as AI generators, sometimes trained on their own work, are used as a cheaper, faster alternative. And then there’s how large language models are disrupting our whole information ecosystem. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">There’s a metaphor we use in the book, the idea that information is being output from these models and results in information ecosystem spills, like toxic spills that really can’t be cleaned up. There’s not really a reliable way to detect synthetic text. And so you’re having to deal with and navigate and try to understand whether something on the internet is actually reflective of truth claims that are being made and perhaps researched more deeply by human individuals. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">You’ve written that the strongest critiques against AI boosterism come from black, brown, poor, queer, and disabled scholars and activists. Can you talk about some examples of these critiques and why these groups specifically are so uniquely positioned to make them? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">So we wrote about that in the register of thinking about the ways in which systems, in here, I want to say data-driven systems, not just large language models, but even different systems just don’t work for black, brown communities, queer, and trans people, and then people like refugees and people on the move. The kind of pioneering work of Drs. Temnit Gebru and Joy Buolamwini in their paper Gender Shades talks about facial analysis systems, specifically the way that facial analysis systems do very poorly on darker-skinned women and that there’s a huge delta between darker-skinned women and lighter-skinned men. Sasha Costanza-Chock talks about how tools like TSA scanners do very poorly on trans people. Typically flagging genitals as anomalies or chest areas as anomalities, and then the kind of disparities of how systems talk about women. So there’s been a few papers talking about the ways in which different tools, in this case a word embedding space, makes associations between people and occupation. So, man is to doctor, women is to… typically, the completion is nurse, so it makes presuppositions of this. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">All of this stuff effectively happens in large-language models [laughter] and happens in image generation models as well. There’s some great research by the Bloomberg data team that shows that if you input something like a nurse, uh, typically or a housekeeper, it outputs a kind of a phenotypically looking darker-skinned woman. If you type in CEO, white man. And so those kinds of elements are the bias element of it. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">Ruha Benjamin sums it up really nicely in this beautiful essay called The New Artificial Intelligentsia that appeared in the LA Review of Books in 2024. And she’s talking about these ideas of transhumanism and merging with the machines. She says this zealous desire to transcend humanity ignores the fact that we have not all had the chance to be fully human. My interpretation of what she’s saying is that the people that society does not accord full humanity to have a very different experience of technology, both in the ways, as Alex is saying, it’s being used on them, in the ways that doesn’t work well for them and just in the way that it intrudes on their life. And so people who have the privilege of not experiencing any of that tend to be less sensitized to what’s going on and to have a less informed perspective. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">And this less-informed perspective encourages AI boosters, who continue to fuel the hype machine. This means investing in and launching new products at a breakneck pace, often overlooking the real-world impact. The MIT Technology Review recently reported that generating one 5-second AI video uses about 3.4 million joules, the equivalent of running a microwave for over an hour. At scale this amount of energy consumption is devastating for the environment. And running all of this comes at a steep price for AI companies, too. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Like we talked about earlier, OpenAI’s Sora app is proving to be wildly expensive, with more users generating videos than actually watching them. And after the copyright fiasco and subsequent new guardrails, it seems like some initial adopters are already moving on. Can the hype machine sustain this kind of frenzied investment with such limited return? \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Okay, we’re opening one last tab. Is the height machine breaking? \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Do you think the AI hype bubble is going to burst? I mean, like, are there economic critiques? You’ve heard the social ones, but is there anything pointing to the AI height bubble possibly at least deflating? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, well, the problem is that there’s so much capital expenditure going into building things like data centers, and they’re going into these massive data center build out where, you know, the kind of projections and how much OpenAI, Microsoft, Google, Amazon, and Meta are spending on this all is astronomical. I mean, hundreds of billions of dollars, just some of the largest technological infrastructure projects that we’ve ever seen. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">At the same time, OpenAI again, the company that has the most queries to Chat GPT, people using most of its products, is making revenue on the order of maybe $10 billion a year. So it’s just orders of magnitude less. And the kind of metaphor that’s being used as well, we have to build the railroads first, and then once the rail roads get going, we can put rail cars in them. But that metaphor doesn’t work at all. People are already using the product. And, you know, companies are already saying, we’re not getting a lot of value out of this. You know, there was an, something that was coming out of MIT, which said 95% of companies just haven’t really gained value from quote unquote AI. So what’s happening? This is very bubble shaped, you now, and I don’t know how the story ends, but it’s very alarming that these four to seven companies are propping up the US and world economy right now, so what happens when the bubble deflates or bursts, it’s not going to be good. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Like you said, um, you finished this book in September 2024. The AI industry has only grown since then. What have you learned about the state of the AI hype machine from the reception to your book? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">I would say what I’ve learned the most about is about the resilience of people and the importance of connection and community. So the antidote to the hype is a variety of things, one is ridicule as praxis, as we say in the book, and also solidarity and labor movements, but also just sort of connection. And one form of that connection is that there’s a lot of people who are, who feel isolated in a workplace or a social circle where everyone around them seems just completely gaga for this technology and they’re the odd one out. And so one of the joys of both our podcasts and this book has been to find those people and be found by those people who say, oh, so glad I’m not the only one. And then they can form community with other people who have the same reaction and I think that that is super important. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">One of the things we grapple a lot just like within Close All Tabs is where to draw the line with AI use, you know. And again, that’s complicated. What is AI? For example, we don’t use ChatGPT, but we use an AI transcription tool for our interviews. Are there conditions under which using large language models, AI tools, are reasonable or justified, appropriate? And then what’s your message to the average listener who maybe uses ChatGPT in their daily but they’re not necessarily AI boosters and not necessarily AI doomers. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. Um, so to the first question, I would say I never call it an AI transcription tool. I would say automatic transcription, right? And that is a use case where, you know, you want to look at the labor conditions of the people who produced it, where the training data come from. And it’s also a use case where you are well positioned to check the output and see if it’s working well for you, right. You’ve got something that has been recorded, you’ve got an automatically produced transcript, you’re presumably going through and correcting it. And if it is wrong all the time, or if you have one that is particularly bad for non-Anglo names, for example, you might start looking for something that’s better. So that is a case of automation that I think can be okay. You still want to look into who produced it. Are there privacy implications? Can I use this tool without uploading my data to somebody else and so on? But there’s reasonable uses and reasonable ways to produce automatic transcription. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">If we’re talking about chat bots of the form of ChatGPT, I don’t see reasonable use cases there. And partially we know that the labor and environmental costs are extraordinarily high, that this is not produced ethically. But even setting that aside, every time you turn to ChatGPT for information, you’re cutting yourself off from important sense-making. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">One of the examples I like to use, if you think about an old fashioned search engine that gave you back, you know, the 10 blue links and you’ve got a medical query, what might come back in those links is a link to, you know something like the Mayo Clinic and then your regional university medical center, so in the Bay area, you know UCSF. And you might get a link to Dr. Oz’s page and you might get a link to a discussion forum where people with the same medical questions are talking to each other. And you can then look at those and understand the information that’s there based on what you know about the Mayo Clinic and UCSF and Dr. Oz and discussion forums. But that also helps you continue to update what you know, about those kinds of sites. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Whereas if you asked a chatbot and you got back something that was just sort of some paper mache made up out of some combination of what’s in those sites, you not only don’t know how to contextualize what you’ve seen, but you’re also cut off from ability to continue to understand the information environment. And then very importantly, if you think about that discussion forum, any given, you know, sentence from that discussion forum interpreted as information, you’re going to want to take with a big grain of salt. But the chance to connect with people who are going through the same medical journey is priceless. And there’s a, the scholar Chris Gilliard describes these technologies as technologies of isolation. And I think it’s really important to think about anytime you might turn to a chat bot- what would you have done three years ago? What would you have done when ChatGPT was not in your world and what are you missing out on by not doing that now? The connections that you would make with people, the ongoing maintenance of relationships, the building of community, the deeper sense of what’s going on in the world around you, all of these are precious and I think not to be thrown away for the semblance of convenience. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">And then I think the final thing that I would say is look out for, identify, and reject the inevitability narrative. So the tech companies would like us to believe that AI is the future, it’s definitely coming. Even if you don’t like it, you have to resign yourself to it. And you’ll get people saying, well, it’s here to stay, we have to learn what to live with it. And I refuse that. I say that is also a bid to steal our agency because the future is not written. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Those are all my questions. Thank you so much for joining us.\u003cbr>\n\u003c/span>\u003cb>\u003c/b>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, thank you. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">It was a pleasure. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Let’s close all of these tabs. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Close All Tabs is a production of KQED studios and is reported and hosted by me, Morgan Sung. This episode was produced by Chris Egusa and edited by Jen Chien. Close All tabs producer is Maya Cueva. Chris Egusa is our senior editor. Additional editing by Chris Hambrick and Jen Chien, who’s KQED’s director of podcasts. Original music, including our theme song and credits by Chris Egusa. Additional music by APM. Brendan Willard is our audio engineer. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Audience engagement support from Maha Sanad. Katie Sprenger is our podcast operations manager and Ethan Toven-Lindsey is our editor in chief. Some members of the KQED podcast team are represented by the Screen Actors Guild, American Federation of Television and Radio Artists, San Francisco, Northern California Local. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">This episode’s keyboard sounds were submitted by my dad, Casey Sung, and recorded on his white and blue Epomaker Aula F99 keyboard with Greywood V3 switches and Cherry Profile PBT keycaps. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Okay, and I know it’s a podcast cliche, but if you like these deep dives and want us to keep making more, it would really help us out if you could rate and review us on Spotify, Apple Podcasts, or wherever you listen to the show. Follow us on Instagram at CloseAllTabsPod, or TikTok at Close All Tabs. Thanks for listening. \u003c/span>\u003c/p>\n\u003cp> \u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n",
"blocks": [],
"excerpt": "Researchers Emily M. Bender and Alex Hanna critique the AI industry’s loftiest claims and point out real-world harms of AI.",
"status": "publish",
"parent": 0,
"modified": 1760644665,
"stats": {
"hasAudio": true,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 79,
"wordCount": 6701
},
"headData": {
"title": "Beyond the AI Hype Machine | KQED",
"description": "When ChatGPT launched in 2022, it kicked off what some have called the “AI hype machine” — a frenzy of promotion and investment that has sent some tech companies’ valuations soaring to record heights. Meanwhile, computational linguist Emily M. Bender and AI researcher and sociologist Alex Hanna have proudly worn the titles of “AI hype busters,” critiquing the industry’s loftiest claims and pointing out the real-world harms behind this wave of excitement. What began as a satirical podcast is now a book, The AI Con: How to Fight Big Tech’s Hype and Create the Future We Want. In this episode, Alex and Emily explain why the very term “AI” is misleading, how AI boosters and doomers are really flip sides of the same coin, and why we should question the AI inevitability narrative.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"socialDescription": "When ChatGPT launched in 2022, it kicked off what some have called the “AI hype machine” — a frenzy of promotion and investment that has sent some tech companies’ valuations soaring to record heights. Meanwhile, computational linguist Emily M. Bender and AI researcher and sociologist Alex Hanna have proudly worn the titles of “AI hype busters,” critiquing the industry’s loftiest claims and pointing out the real-world harms behind this wave of excitement. What began as a satirical podcast is now a book, The AI Con: How to Fight Big Tech’s Hype and Create the Future We Want. In this episode, Alex and Emily explain why the very term “AI” is misleading, how AI boosters and doomers are really flip sides of the same coin, and why we should question the AI inevitability narrative.",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Beyond the AI Hype Machine",
"datePublished": "2025-10-15T03:00:53-07:00",
"dateModified": "2025-10-16T12:57:45-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 33520,
"slug": "podcast",
"name": "Podcast"
},
"source": "Close All Tabs",
"sourceUrl": "https://www.kqed.org/podcasts/closealltabs",
"audioUrl": "https://chrt.fm/track/G6C7C3/traffic.megaphone.fm/KQINC5696998106.mp3?updated=1760509908",
"sticky": false,
"nprStoryId": "kqed-12059911",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12059911/beyond-the-ai-hype-machine",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>\u003ca href=\"#episode-transcript\">\u003ci>View the full episode transcript.\u003c/i>\u003c/a>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">When ChatGPT launched in 2022, it kicked off what some have called the “AI hype machine” — a frenzy of promotion and investment that has sent some tech companies’ valuations soaring to record heights. Meanwhile, computational linguist Emily M. Bender and AI researcher and sociologist Alex Hanna have proudly worn the titles of “AI hype busters,” critiquing the industry’s loftiest claims and pointing out the real-world harms behind this wave of excitement. What began as a satirical podcast is now a book, \u003c/span>\u003ca href=\"https://thecon.ai/\">\u003ci>\u003cspan style=\"font-weight: 400\">The AI Con\u003c/span>\u003c/i>\u003c/a>\u003cspan style=\"font-weight: 400\">:\u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\"> How to Fight Big Tech’s Hype and Create the Future We Want\u003c/span>\u003c/i>\u003cspan style=\"font-weight: 400\">. In this episode, Alex and Emily explain why the very term “AI” is misleading, how AI boosters and doomers are really flip sides of the same coin, and why we should question the AI inevitability narrative. \u003c/span>\u003c/p>\n\u003cp>\u003c!-- iframe plugin v.4.3 wordpress.org/plugins/iframe/ -->\u003cbr>\n\u003ciframe loading=\"lazy\" frameborder=\"0\" height=\"200\" scrolling=\"no\" src=\"https://playlist.megaphone.fm/?e=KQINC5696998106\" width=\"100%\" class=\"iframe-class\">\u003c/iframe>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Guests: \u003c/span>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://bsky.app/profile/emilymbender.bsky.social\">\u003cspan style=\"font-weight: 400\">Emily Bender\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">, professor of linguistics the University of Washington\u003c/span>\u003c/li>\n\u003cli>\u003ca href=\"https://bsky.app/profile/alexhanna.bsky.social\">Alex Hanna\u003c/a>, director of research at the Distributed AI Research Institute\u003c/li>\n\u003c/ul>\n\u003cp>\u003cspan style=\"font-weight: 400\">Further reading/listening: \u003c/span>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://thecon.ai/\">\u003cspan style=\"font-weight: 400\">The AI Con: How to Fight Big Tech’s Hype and Create the Future We Want\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Emily Bender and Alex Hanna\u003c/span>\u003c/li>\n\u003cli>\u003ca href=\"https://www.dair-institute.org/maiht3k/\">\u003cspan style=\"font-weight: 400\">The Mystery AI Hype Theater 3000 Podcast\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Emily M. Bender and Alex Hanna\u003c/span>\u003c/li>\n\u003cli>\u003ca href=\"https://www.techpolicy.press/ai-hurts-consumers-and-workers-and-isnt-intelligent/\">\u003cspan style=\"font-weight: 400\">“AI” Hurts Consumers and Workers — and Isn’t Intelligent\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Emily Bender and Alex Hanna, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">Tech Policy Press\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://lithub.com/on-the-very-real-dangers-of-the-artificial-intelligence-hype-machine/\">\u003cspan style=\"font-weight: 400\">On the Very Real Dangers of the Artificial Intelligence Hype Machine: Emily M. Bender and Alex Hanna Explore AI History, the Cold War, and a Fatally Overhyped Idea \u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\">— Emily M. Bender, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">LitHub\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://www.404media.co/sora-2-content-violation-guardrails-error/\">\u003cspan style=\"font-weight: 400\">People Are Crashing Out Over Sora 2’s New Guardrails\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Samantha Cole, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">404 Media\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://futurism.com/artificial-intelligence/sora-2-financial-problem\">\u003cspan style=\"font-weight: 400\">Sora 2 Has a Huge Financial Problem\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — Victor Tangermann, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">Futurism\u003c/span>\u003c/i>\u003c/li>\n\u003cli>\u003ci>\u003c/i>\u003ca href=\"https://www.technologyreview.com/2025/05/20/1116327/ai-energy-usage-climate-footprint-big-tech/\">\u003cspan style=\"font-weight: 400\">We did the math on AI’s energy footprint. Here’s the story you haven’t heard.\u003c/span>\u003c/a>\u003cspan style=\"font-weight: 400\"> — James O’Donnell and Casey Crownhart, \u003c/span>\u003ci>\u003cspan style=\"font-weight: 400\">MIT Technology Review\u003c/span>\u003c/i>\u003c/li>\n\u003c/ul>\n\u003cp>Want to give us feedback on the show? Shoot us an email at \u003ca href=\"mailto:CloseAllTabs@KQED.org\">CloseAllTabs@KQED.org\u003c/a>\u003c/p>\n\u003cp>\u003ca href=\"https://www.instagram.com/closealltabspod/\">Follow us on Instagram\u003c/a>\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003ch2>\u003c/h2>\n\u003ch2 id=\"episode-transcript\">Episode Transcript\u003c/h2>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">This is a computer-generated transcript. While our team has reviewed it, there may be errors.\u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">A few weeks ago, OpenAI launched an app, Sora. It’s a vertical video social platform, similar to TikTok, except all the videos are generated by the company’s AI image generator, Sora 2. Within days, the app was a copyright infringement nightmare. There were videos of SpongeBob cooking meth, unsanctioned Rick and Morty ads for crypto startups, and many, many videos of open AI CEO Sam Altman doing depraved things to copyrighted characters. Like the one where he brutally barbecues and carves up Pikachu. \u003c/span>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">[AI CEO Sam Altman] Pikachu on the grill here. It’s already got a beautiful char and it smells like somebody plugged in a chicken. Let’s give it a flip. I’m gonna carve it into some thick steaks. Look at that. Crust on the outside, pink and juicy in the middle. Cheers.\u003c/span>\u003c/i>\u003cspan style=\"font-weight: 400\"> \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">All of these 10 second videos require an immense amount of computing power, which is extremely costly to maintain. In a blog post, Sam Altman admitted that the company still needs to figure out how to make money off of Sora. He wrote, “People are generating much more than we expected per user, and a lot of videos are being generated for very small audiences.” Facing heat from copyright holders like Disney and Nintendo, Altman also announced extra guardrails for the app to curb infringement. Now, users are complaining that everything they try to generate using Sora 2 gets flagged as a violation of the copyrighted content policy. They’re already getting bored of the app. This whole cycle has been described as the AI hype machine. Big investments are made based on big promises of innovation, disruption, revolution. This hype fuels more investment, which, in turn, fuels the hype. The cycle continues when a new product launches. Meta, for example, launched its own AI social video app, called Vibes, last month too, which was quickly forgotten about when Sora launched. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">AI hype is effectively premised on fear of missing out. It is the fear that if you don’t get onto this new technology, you are going to be left behind. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">That’s Alex Hanna, a sociologist and the Director of Research at the Distributed AI Research Institute. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">If you’re a corporate manager, you’re going to have your competitors just leave you in the dust. If you are a teacher, you are doing a disservice to your students by not preparing them for the job market of the future. If you were a student, you were going to miss out on all the skills and all your classmates are going to be outperforming you. And as a worker, you will be doing things the old way, the analog way, and everyone is going to be outpacing you. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Alex and her co-author, Emily M. Bender, recently published a book, The AI Con, How to Fight Big Tech’s Hype and Create the Future We Want. Emily runs the computational linguistics program at the University of Washington. This is a field of study that combines human language with machine learning. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">I often get asked the question, well aren’t you worried that students are going to get left behind? Etc. And my answer to that is often, where is everybody going? Like, this metaphor of left behind suggests that people are running off into some brilliant future. I just don’t see it, you know, setting aside the fact that the technology doesn’t do what it’s being sold to do, but that is overhyped and over promised. The idea that we’d be better off with instead of interacting with people at all stages, interacting with screens that that’s just not the future that I want. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">In this episode, we’re talking about the AI hype machine, when it started, how it’s fed, and why a growing corner of critics say they see right through it. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">This is Close All Tabs. I’m Morgan Sung, tech journalist and your chronically online friend, here to open as many browser tabs as it takes to help you understand how the digital world affects our real lives. Let’s get into it. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">All right, like we always do, we’re starting by opening a new tab. What is P-Doom? In their book, The AI Con, Alex and Emily talk about these two groups. There are the AI boosters, the people who are optimistic that AI will pave the way to our utopian future. Then there are the AI doomers: the people that catastrophize, and believe that AI progress will usher in an era of societal collapse and human extinction. It’s very Matrix. \u003c/span>\u003c/p>\n\u003cp>\u003ci>\u003cspan style=\"font-weight: 400\">[Clip from the film “The Matrix] The Matrix is a system, Neo. That system is our enemy. \u003c/span>\u003c/i>\u003c/p>\n\u003cp>\u003cb>Morgan Sung \u003c/b>\u003cspan style=\"font-weight: 400\">But before we break this down further, let’s start by defining our terms. Here’s Emily. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">Artificial intelligence does not refer to a coherent set of technologies, and it has throughout its history, since it was coined by John McCarthy in 1955, basically been used to sell this idea of some magic do-everything technology in order to get money. Initially, it was research funding and then DOD money and now a lot of it is venture capital money. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, and the way that this has proliferated in the modern day is that so many things get called AI. So that could be automated decision-making systems used for determining whether someone gets social services. And so that gets looped in, and then we also get recommendation systems, things like the TikTok algorithm, the Instagram Reels algorithm, pick your short-form video. But then, it’s really manifest in these large language models and diffusion models that are looped into the category of generative AI.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">You start this book from this one moment in 2023, when Chuck Schumer at the time, the Senate majority leader, held a series of forums around AI. Can you take us back to that moment and like set the scene for us? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">So, late 2023, Chuck Schumer is convening the eighth of total nine Senate Insight forums around AI, and he asks folks, this is very weird, he asks, “what is folks’ probability of doom?” And this is abbreviated as P(doom), and for this instance, it’s an audio platform, that is P, open parentheses, doom, closed parentheses. And he also asked, “what people’s pee hope is.” So this means what is your probability that there’s going to be some kind of a doom scenario, in which through, you know, hook or crook, some kind of thing called AI is going to outperform or outsmart humans and take over and lead to human extinction. And in the book, we start and we say, well, this is the wrong question. But also if you’re looking at harms that are happening in the here and now, there are many that exist, whether that be deep fake porn being made out of non-consensual adults and children, the use of automated decision-making and weapons targeting, especially in Gaza, and then we also talk about students having their exams effectively being judged by these automated tools. So talking about P(doom) in this register is asking the wrong question and focusing on the wrong things. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">But oftentimes it looks like the doomers, the people with a high P(doom) value, the people who take that question seriously in the first place, um, and the boosters, the people who say this is gonna solve all our problems, are like the opposite ends of a spectrum. And that is how these people present themselves, it is how the media often presents what’s going on, and it is very misleading. I think that one of the points that we make is that doomerism is another kind of AI hype, because it’s saying, our system is very powerful. It’s so powerful, it’s going to kill us all, is a way of saying it’s very powerful, but also we make the point that the doomers and the boosters are two sides of the same coin. And it, I think, becomes very clear if you look at it this way, which is to say, the doomers say, “AI is a thing, it’s imminent, it’s inevitable, and it’s gonna kill us all.” And the boosters say, “AI’s a thing, it’s imminent, it’s inevitable, and it is gonna solve all of our problems.” And it’s pretty easy to see these are the same position with just a different twist at the end. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">And the funny thing about this boosterism, doomerism dichotomy is that these are many of the same people or they run in many of same circles. So, you know, there was this document that was put out called AI 2027, in which it ends with humanity dying and the kind of choose your own adventure. There’s only two endings here. The choose your own adventure and one of them, you know everyone dies. But the lead author of this works at OpenAI. And there’s many such cases of people who are working on quote unquote, “AI alignment”, who are in these industries. So, it’s again not as if they’re against the building of AI, or we should just say no, it’s actually a very narrow segment of people. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">You described this industry as the AI hype machine, the modern AI hype machine, what does it look like? I mean, who are the players? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, I mean, the players are many of the big tech players that we know. So Microsoft, Google, Amazon, Meta, but with some new entrants, OpenAI being the most significant one. Um, and along with OpenAI, a few offshoots, so Anthropic is kind of the most notable one. And then the company that’s creating the shovels for the gold rush, so that’s your Nvidia, and then your Taiwanese semiconductor manufacturing company, abbreviated as TSMC. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">I want to say that we see AI hype not just originating from those big players, like that is a large source of it. Also we hear over and over and again about people working in various businesses being told by their higher ups that they have to try this new AI thing. And so there’s this sort of secondary promulgation of hype that comes from middle management and up that have been sold on the idea that this is going to, you know, really increase productivity. And, you know, on the one hand, it’s a very useful excuse for doing layoffs that they may have otherwise already wanted to do, but then on the other hand, some people seem to have really bought into the idea. So they tell the people working for them, you have to spend time figuring out how to make yourself more productive by using these so-called AI tools, because everyone’s telling me that that’s the way of the future. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">I mean, the obvious way people are, these players, are feeding into the AI hype machine is by extolling the virtues of AI, or, you know, kind of spreading this very doomerous sci-fi rhetoric. But what other strategies are being used to feed this machine? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">So one important strategy is what I sometimes call citations to the future. So people will say, yeah, yeah. It’s got problems now, but it’s going to do all of these things. And I think it really is the only technology that we are expected to evaluate based on promises of what it will be doing, right? That car that I just bought only gets, you know, 35 miles to the gallon. But that’s OK, because the later one’s going to get 50. We don’t talk about it that way, except with the so-called AI technology.\u003c/span>\u003c/p>\n\u003cp>So, citations to the future is one big strategy and another one is anthropomorphizing language, talking about things that have happened as if the computer systems themselves did it of their own volition and autonomously instead of people having used the system to do it or done something in order to build the system. So it’ll be something like, AI needs lots and lots of data. Well, no, people who want to build the system that they’re calling AI are amassing lots and lots of data in order to build them, or AI is thirsty, it needs lots of water, or AI was able to identify, you know, something in a blurry image. It’s like — in no sense, right? \u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">People used XYZ tool in order do a thing, or in order to build these tools, they are using lots of of water and so on. So this anthropomorphizing language sort of shifts the people out of the frame and hides a bunch of accountability, and at the same time, makes the systems sound cooler than they are. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Alex and Emily also pointed out that players in the AI industry push this adoption of AI into our everyday lives by really trying to humanize the product. We’re gonna dive into that in a new tab. First, a quick break. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Time for a new tab! Are we really just meat machines?\u003cbr>\n\u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Let’s talk about the technology itself, like the way people talk about large language models as AI, um, ChatGPT, Claude, Grok. Many people understand that these models are basically predicting the words that most often go together. But can you break it down further? Like, what’s really going on under the hood there? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">So, the first very important lesson is that when we say word, we’re actually talking about two things. We’re talking about the way the word is spelled and pronounced and what it is used to mean. And one thing that makes that hard to keep in mind is that as proficient speakers of the languages we speak, pretty much anytime we encounter the spelling or sound of a word, we are also encountering what the person using it is using it to talk about. And so we always experience the form and meaning together. But a language model, so that the core component of something like Gemini or Grok or Claude or ChatGPT is literally a system for modeling which bits of words go with which other bits of words in whatever the input collection of text was to create that model. And so what we have are models that are very good at putting literally like spellings of parts of words next to each other in a way that looks like something somebody might say. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Emily and Alex have come up with a few phrases that illustrate what large language models really are, which also describe the limitations of this tech. We’ve got synthetic text extruding machine. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">The choice of the word extrude is very intentional because it’s a little gross. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Racist pile of linear algebra. Spicy autocomplete. And one phrase that really took off, stochastic parrot. Emily coined the phrase in a research paper she co-authored in 2020. Parrots can mimic human speech, but whether they can really comprehend it, that’s dubious. Stochastic comes from probability theory. It means randomly determined. So a stochastic parrot essentially mimics language in a random order and does so convincingly, but it doesn’t understand it. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">Starting with OpenAI’s GPT-2 and GPT3, they were using it to create synthetic text. And so one of the things we worried about in that paper is what happens if someone comes across synthetic text and doesn’t know that it was synthetic? What we didn’t realize at the time is that people would be happy to look at synthetic text while knowing that it’s synthetic. That is very surprising to me. And so the phrase stochastic parrots was this attempt to make vivid what’s going on, to help people understand why the output of a language model run to repeatedly answer the question, what’s a likely next word, is not the same thing as text produced by a person or group of people with something to communicate. And what’s happened, it’s been fascinating as a linguist to watch that phrase go out into the world, so for the first little while, it was people referring to the paper, and then it sort of became people talking about, um, that claim that large language models are not understanding, they’re just repeatedly predicting a likely next word. And then it got picked up or interpreted as an insult, which is surprising to me because in order for it to be an insult, the thing that it’s being applied to would have to the kind of thing that could be insulted. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Then in 2022, Sam Altman tweeted, I am a stochastic parrot and so are you. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">I think what happens when Sam Altman picks it up and tweets that is that it is, on the one hand, sort of an attempt to reclaim what is understood as an insult or slur by people in that mindset, but also, and very importantly, it is about minimizing what it is to be human, so that he can claim that the system that he’s built is as good as a person.\u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Emily and Alex say this concept of comparing humans to, essentially, flesh machines is a classic move in the AI hype machine playbook. It’s reducing humanity and what it means to be human to programming, like Eliza in the 60s. Eliza was an early natural language processing program designed to mimic a therapist. Think of it as a great, great, great, grand chatbot of ChatGPT. A lot of people, from academics to government leaders to tech industry giants, bought into the Eliza hype. And that freaked out Eliza’s own creator, Joseph Weizenbaum. In a book he published in the 70s, Weizenbaum warned that machines would never be able to make the same decisions that humans make because they don’t have human empathy. His criticism of AI caused a stir in the research community. And decades later, AI boosters are still making that same claim. That humans and machines aren’t that different. But what does this devaluing of humanity really mean for us? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, I mean, it means a lot of things. It really seems to emphasize that there is, kind of, aspects of human behavior that can just be reduced to our observable outputs, right? Humans are just things that output language or output actions, when that’s not true. Humans have a much more vivid internal life. Um, we think about others. Uh, we think about, kind of, co-presence, but it’s more about saying how we’re comparing ourselves to machines that are programmed by people and those people in those institutions have particular types of incentives to make machines that behave as such. So that’s the kind of implications that it has and it also has the implications of other kinds of moves into humanism, dehumanization and what that does and how we treat people and with regards to dignity and propriety of rights. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Can you also give concrete examples of where we see this kind of, uh, devaluing of humans? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">So I think if we say that humans can be reduced to their outputs, that that leads to lots of problems. And one is we end up saying, you know, the form of, or the words that teachers and students say in the classroom is the learning situation. And so we can replace the teacher with a system for outputting words and then those students will get as much and maybe it’ll be personalized and it’ll better. And that is dehumanizing to teachers clearly and also to students because it removes, you know, everything that is about the student and teacher’s internal life and about their relationship and about their community from the situation. But I think it’s also really important in terms of the workforce more generally, that basically if we say, well, humans like large language models are systems for outputting words, then it’s a very small step to basically saying the whole value of this person is how many words they can output and doing a very, very dehumanizing work environment to people. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">We also see this in other domains like the Amazon work floor and the ways that these mini robots flit from place to place and the so-called quote unquote pickers. People on Amazon work warehouses have to pick things and then deliver them. So there’s a lot of implications for that and I think also in seeing the humanity in other folks and how we treat other folks. You know, if they’re merely meat machines, then what does it say about how we view them with respect to, kind of, personal rights and human rights and what kind of rights they should be afforded? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">This idea of human beings just being walking meat machines is chilling. It definitely creeped me out. What are the other real world consequences of this thinking? Let’s open a new tab. Who’s really harmed by AI hype? Alex and Emily have said that their goal with writing the AI con is to reduce the harm caused by AI hype. Automation, for example, doesn’t just replace jobs. Healthcare providers are increasingly relying on AI products for medical triage to decide which patients to see first. Free legal representation, a guaranteed right in criminal cases, can be replaced by a lawyer using a chat bot. All of this potentially lowers the quality of these services. And introduces bias into these systems. Artists and other creatives, meanwhile, are struggling to make ends meet as AI generators, sometimes trained on their own work, are used as a cheaper, faster alternative. And then there’s how large language models are disrupting our whole information ecosystem. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">There’s a metaphor we use in the book, the idea that information is being output from these models and results in information ecosystem spills, like toxic spills that really can’t be cleaned up. There’s not really a reliable way to detect synthetic text. And so you’re having to deal with and navigate and try to understand whether something on the internet is actually reflective of truth claims that are being made and perhaps researched more deeply by human individuals. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">You’ve written that the strongest critiques against AI boosterism come from black, brown, poor, queer, and disabled scholars and activists. Can you talk about some examples of these critiques and why these groups specifically are so uniquely positioned to make them? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">So we wrote about that in the register of thinking about the ways in which systems, in here, I want to say data-driven systems, not just large language models, but even different systems just don’t work for black, brown communities, queer, and trans people, and then people like refugees and people on the move. The kind of pioneering work of Drs. Temnit Gebru and Joy Buolamwini in their paper Gender Shades talks about facial analysis systems, specifically the way that facial analysis systems do very poorly on darker-skinned women and that there’s a huge delta between darker-skinned women and lighter-skinned men. Sasha Costanza-Chock talks about how tools like TSA scanners do very poorly on trans people. Typically flagging genitals as anomalies or chest areas as anomalities, and then the kind of disparities of how systems talk about women. So there’s been a few papers talking about the ways in which different tools, in this case a word embedding space, makes associations between people and occupation. So, man is to doctor, women is to… typically, the completion is nurse, so it makes presuppositions of this. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">All of this stuff effectively happens in large-language models [laughter] and happens in image generation models as well. There’s some great research by the Bloomberg data team that shows that if you input something like a nurse, uh, typically or a housekeeper, it outputs a kind of a phenotypically looking darker-skinned woman. If you type in CEO, white man. And so those kinds of elements are the bias element of it. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">Ruha Benjamin sums it up really nicely in this beautiful essay called The New Artificial Intelligentsia that appeared in the LA Review of Books in 2024. And she’s talking about these ideas of transhumanism and merging with the machines. She says this zealous desire to transcend humanity ignores the fact that we have not all had the chance to be fully human. My interpretation of what she’s saying is that the people that society does not accord full humanity to have a very different experience of technology, both in the ways, as Alex is saying, it’s being used on them, in the ways that doesn’t work well for them and just in the way that it intrudes on their life. And so people who have the privilege of not experiencing any of that tend to be less sensitized to what’s going on and to have a less informed perspective. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">And this less-informed perspective encourages AI boosters, who continue to fuel the hype machine. This means investing in and launching new products at a breakneck pace, often overlooking the real-world impact. The MIT Technology Review recently reported that generating one 5-second AI video uses about 3.4 million joules, the equivalent of running a microwave for over an hour. At scale this amount of energy consumption is devastating for the environment. And running all of this comes at a steep price for AI companies, too. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Like we talked about earlier, OpenAI’s Sora app is proving to be wildly expensive, with more users generating videos than actually watching them. And after the copyright fiasco and subsequent new guardrails, it seems like some initial adopters are already moving on. Can the hype machine sustain this kind of frenzied investment with such limited return? \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Okay, we’re opening one last tab. Is the height machine breaking? \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Do you think the AI hype bubble is going to burst? I mean, like, are there economic critiques? You’ve heard the social ones, but is there anything pointing to the AI height bubble possibly at least deflating? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, well, the problem is that there’s so much capital expenditure going into building things like data centers, and they’re going into these massive data center build out where, you know, the kind of projections and how much OpenAI, Microsoft, Google, Amazon, and Meta are spending on this all is astronomical. I mean, hundreds of billions of dollars, just some of the largest technological infrastructure projects that we’ve ever seen. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">At the same time, OpenAI again, the company that has the most queries to Chat GPT, people using most of its products, is making revenue on the order of maybe $10 billion a year. So it’s just orders of magnitude less. And the kind of metaphor that’s being used as well, we have to build the railroads first, and then once the rail roads get going, we can put rail cars in them. But that metaphor doesn’t work at all. People are already using the product. And, you know, companies are already saying, we’re not getting a lot of value out of this. You know, there was an, something that was coming out of MIT, which said 95% of companies just haven’t really gained value from quote unquote AI. So what’s happening? This is very bubble shaped, you now, and I don’t know how the story ends, but it’s very alarming that these four to seven companies are propping up the US and world economy right now, so what happens when the bubble deflates or bursts, it’s not going to be good. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Like you said, um, you finished this book in September 2024. The AI industry has only grown since then. What have you learned about the state of the AI hype machine from the reception to your book? \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">I would say what I’ve learned the most about is about the resilience of people and the importance of connection and community. So the antidote to the hype is a variety of things, one is ridicule as praxis, as we say in the book, and also solidarity and labor movements, but also just sort of connection. And one form of that connection is that there’s a lot of people who are, who feel isolated in a workplace or a social circle where everyone around them seems just completely gaga for this technology and they’re the odd one out. And so one of the joys of both our podcasts and this book has been to find those people and be found by those people who say, oh, so glad I’m not the only one. And then they can form community with other people who have the same reaction and I think that that is super important. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">One of the things we grapple a lot just like within Close All Tabs is where to draw the line with AI use, you know. And again, that’s complicated. What is AI? For example, we don’t use ChatGPT, but we use an AI transcription tool for our interviews. Are there conditions under which using large language models, AI tools, are reasonable or justified, appropriate? And then what’s your message to the average listener who maybe uses ChatGPT in their daily but they’re not necessarily AI boosters and not necessarily AI doomers. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah. Um, so to the first question, I would say I never call it an AI transcription tool. I would say automatic transcription, right? And that is a use case where, you know, you want to look at the labor conditions of the people who produced it, where the training data come from. And it’s also a use case where you are well positioned to check the output and see if it’s working well for you, right. You’ve got something that has been recorded, you’ve got an automatically produced transcript, you’re presumably going through and correcting it. And if it is wrong all the time, or if you have one that is particularly bad for non-Anglo names, for example, you might start looking for something that’s better. So that is a case of automation that I think can be okay. You still want to look into who produced it. Are there privacy implications? Can I use this tool without uploading my data to somebody else and so on? But there’s reasonable uses and reasonable ways to produce automatic transcription. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">If we’re talking about chat bots of the form of ChatGPT, I don’t see reasonable use cases there. And partially we know that the labor and environmental costs are extraordinarily high, that this is not produced ethically. But even setting that aside, every time you turn to ChatGPT for information, you’re cutting yourself off from important sense-making. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">One of the examples I like to use, if you think about an old fashioned search engine that gave you back, you know, the 10 blue links and you’ve got a medical query, what might come back in those links is a link to, you know something like the Mayo Clinic and then your regional university medical center, so in the Bay area, you know UCSF. And you might get a link to Dr. Oz’s page and you might get a link to a discussion forum where people with the same medical questions are talking to each other. And you can then look at those and understand the information that’s there based on what you know about the Mayo Clinic and UCSF and Dr. Oz and discussion forums. But that also helps you continue to update what you know, about those kinds of sites. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Whereas if you asked a chatbot and you got back something that was just sort of some paper mache made up out of some combination of what’s in those sites, you not only don’t know how to contextualize what you’ve seen, but you’re also cut off from ability to continue to understand the information environment. And then very importantly, if you think about that discussion forum, any given, you know, sentence from that discussion forum interpreted as information, you’re going to want to take with a big grain of salt. But the chance to connect with people who are going through the same medical journey is priceless. And there’s a, the scholar Chris Gilliard describes these technologies as technologies of isolation. And I think it’s really important to think about anytime you might turn to a chat bot- what would you have done three years ago? What would you have done when ChatGPT was not in your world and what are you missing out on by not doing that now? The connections that you would make with people, the ongoing maintenance of relationships, the building of community, the deeper sense of what’s going on in the world around you, all of these are precious and I think not to be thrown away for the semblance of convenience. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">And then I think the final thing that I would say is look out for, identify, and reject the inevitability narrative. So the tech companies would like us to believe that AI is the future, it’s definitely coming. Even if you don’t like it, you have to resign yourself to it. And you’ll get people saying, well, it’s here to stay, we have to learn what to live with it. And I refuse that. I say that is also a bid to steal our agency because the future is not written. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Those are all my questions. Thank you so much for joining us.\u003cbr>\n\u003c/span>\u003cb>\u003c/b>\u003c/p>\n\u003cp>\u003cb>Emily M. Bender: \u003c/b>\u003cspan style=\"font-weight: 400\">Yeah, thank you. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Alex Hanna: \u003c/b>\u003cspan style=\"font-weight: 400\">It was a pleasure. \u003c/span>\u003c/p>\n\u003cp>\u003cb>Morgan Sung: \u003c/b>\u003cspan style=\"font-weight: 400\">Let’s close all of these tabs. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Close All Tabs is a production of KQED studios and is reported and hosted by me, Morgan Sung. This episode was produced by Chris Egusa and edited by Jen Chien. Close All tabs producer is Maya Cueva. Chris Egusa is our senior editor. Additional editing by Chris Hambrick and Jen Chien, who’s KQED’s director of podcasts. Original music, including our theme song and credits by Chris Egusa. Additional music by APM. Brendan Willard is our audio engineer. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Audience engagement support from Maha Sanad. Katie Sprenger is our podcast operations manager and Ethan Toven-Lindsey is our editor in chief. Some members of the KQED podcast team are represented by the Screen Actors Guild, American Federation of Television and Radio Artists, San Francisco, Northern California Local. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">This episode’s keyboard sounds were submitted by my dad, Casey Sung, and recorded on his white and blue Epomaker Aula F99 keyboard with Greywood V3 switches and Cherry Profile PBT keycaps. \u003c/span>\u003c/p>\n\u003cp>\u003cspan style=\"font-weight: 400\">Okay, and I know it’s a podcast cliche, but if you like these deep dives and want us to keep making more, it would really help us out if you could rate and review us on Spotify, Apple Podcasts, or wherever you listen to the show. Follow us on Instagram at CloseAllTabsPod, or TikTok at Close All Tabs. Thanks for listening. \u003c/span>\u003c/p>\n\u003cp> \u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12059911/beyond-the-ai-hype-machine",
"authors": [
"11944",
"11943",
"11869",
"11832"
],
"programs": [
"news_35082"
],
"categories": [
"news_33520"
],
"tags": [
"news_25184",
"news_32664",
"news_34755",
"news_22973",
"news_3137",
"news_34646",
"news_1631"
],
"featImg": "news_12059912",
"label": "source_news_12059911"
},
"news_12059714": {
"type": "posts",
"id": "news_12059714",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12059714",
"score": null,
"sort": [
1760460055000
]
},
"guestAuthors": [],
"slug": "newsom-vetoes-most-watched-childrens-ai-bill-signs-16-others-targeting-tech",
"title": "Newsom Vetoes Most-Watched Children's AI Bill, Signs 16 Others Targeting Tech",
"publishDate": 1760460055,
"format": "audio",
"headTitle": "Newsom Vetoes Most-Watched Children’s AI Bill, Signs 16 Others Targeting Tech | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>California Gov. \u003ca href=\"https://www.kqed.org/news/tag/gavin-newsom\">Gavin Newsom\u003c/a> vetoed legislation that would have prohibited developers from offering companion AI chatbots for children unless the companies can promise the software won’t encourage harmful behavior.\u003c/p>\n\u003cp>Sacramento players across the political spectrum watched the legislation closely, with advocates issuing press releases, open letters and \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">research reports\u003c/a> in hopes of swaying California’s tech-friendly governor.\u003c/p>\n\u003cp>In his \u003ca href=\"https://www.gov.ca.gov/wp-content/uploads/2025/10/AB-1064-Veto.pdf\">veto message\u003c/a> published Monday, Newsom wrote that AB-1064 could lead to a total ban on minors using conversational AI tools. “AI is already shaping the world, and it is imperative that adolescents learn how to safely interact with AI systems,” Newsom wrote.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>“We’re sorely disappointed to see Governor Newsom side with Big Tech over the more than 150 families who have suffered the most unimaginable loss: the passing of their child, encouraged by companion AI,” Assemblymember Rebecca Bauer-Kahan of Orinda, the bill’s author, wrote in a statement, noting the bill was sponsored by Common Sense Media, California Attorney General Rob Bonta, and more than 20 organizations.\u003c/p>\n\u003cp>“These AI companies know the risks their products pose. They’ve made purposeful design decisions that put kids in harm’s way, creating chatbots that form dangerous emotional bonds with vulnerable young people,” she added.\u003c/p>\n\u003cfigure id=\"attachment_11917730\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11917730\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut.jpg\" alt=\"\" width=\"1920\" height=\"1440\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut-800x600.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut-1020x765.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut-160x120.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut-1536x1152.jpg 1536w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Assemblywoman Rebecca Bauer-Kahan, D-Orinda, on Political Breakdown. \u003ccite>(Guy Marzorati/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“We’ve seen suicides lately. We’ve seen all sorts of mental health disruptions caused by AI companions. That said, the fight is just beginning,” Common Sense Media CEO Jim Steyer told KQED. “California is clearly leading the way in the United States and globally on these issues, and the next year or two are going to be absolutely critical in defining regulations, guardrails and a common sense future for the big tech industry.”\u003c/p>\n\u003cp>The trade group TechNet lobbied heavily against the bill, \u003ca href=\"https://www.technet.org/the-impact-of-ca-ab-1064/\">running ads\u003c/a> that warned that Bauer-Kahan’s bill could deny children access to critical tools they need to succeed.\u003c/p>\n\u003cp>“We appreciate Governor Newsom’s thoughtful consideration and ultimate veto of this proposed legislation,” wrote Robert Boykin, TechNet’s Executive Director for California and the Southwest. “While TechNet shares the goal of AB 1064, the bill fails to meet its stated objectives while threatening students’ access to valuable AI-driven learning tools, potentially life-saving medical treatments, crisis response interventions, safety mechanisms, and other valuable AI technologies.”[aside postID=news_12059209 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/251008_ENDOF10_-9-KQED.jpg']The host of AI-related bills that made it to Newsom’s desk this legislative session presented him with a political \u003ca href=\"https://www.kqed.org/news/12052617/newsoms-tightrope-walk-between-ai-regulation-and-silicon-valley-cash\">balancing act\u003c/a>, as he eyes a \u003ca href=\"https://www.kqed.org/news/12043766/newsom-tries-to-find-political-footing-in-clash-with-trump\">run for the White House\u003c/a>. Many of the bills were opposed by trade associations heavily bankrolled by Silicon Valley, and California is home to \u003ca href=\"https://www.forbes.com/lists/ai50/\">32 of the 50 top AI companies\u003c/a> worldwide.\u003c/p>\n\u003cp>As if anticipating the blowback from child safety advocates, Newsom’s office released \u003ca href=\"https://www.gov.ca.gov/2025/10/13/governor-newsom-signs-bills-to-further-strengthen-californias-leadership-in-protecting-children-online/\">a list of 16 AI bills\u003c/a> he approved this session, some focused on children. “California has long stood as a bold leader in protecting children from the danger of emerging technology,” the statement read.\u003c/p>\n\u003cp>On the list: SB 243 by Sen. Steve Padilla, D-San Diego, which placed softer limits on AI chatbots for kids. Advocacy groups, including Common Sense Media and \u003ca href=\"https://techoversight.org/wp-content/uploads/2025/09/SB-243-Remove-Support.pdf\">Tech Oversight California\u003c/a>, pulled their support from the bill in mid-September, arguing industry-friendly amendments weakened it and could establish a “dangerous” precedent for other states and countries taking California’s lead on AI regulation.\u003c/p>\n\u003cp>In a similar vein, Newsom signed an \u003ca href=\"https://www.kqed.org/news/12058013/newsom-signs-california-ai-transparency-bill-tailored-to-meet-tech-industry-tastes\">industry-friendly version\u003c/a> of SB-53 by Sen. Scott Wiener (D-San Francisco), after his original effort became target No. 1 for Silicon Valley lobbyists\u003ca href=\"https://www.kqed.org/news/12007323/can-california-still-lead-on-ai-regulation-following-newsoms-veto-of-ai-safety-bill\"> last legislative session\u003c/a> and died on Newsom’s desk.\u003c/p>\n\u003cp>But not all the bills Newsom signed this legislation session lack teeth.\u003c/p>\n\u003cp>AB 621, for instance, expands the ability of deepfake pornography victims to sue anyone who creates, digitally alters, or distributes a sexually explicit image or video in which they appear to engage in sexual conduct without their consent. The expanded private right of action is considered a notable strength when most other AI bills rely on regulatory enforcement, penalties, or agency reporting to sway business practices.\u003c/p>\n\u003cp>Steyer said he was happy to see Newsom’s signature on AB 56, which supporters say will require first-in-the-nation warning labels on social media, similar to what California has mandated on packaging for alcohol and cigarettes.\u003c/p>\n\u003cp>“It’s clear that Gov. Newsom, and also the first partner, Jennifer Siebel Newsom, who’s heavily involved in all this legislation, have listened to parents, and advocacy groups around the state,” Steyer said.\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "The legislation would have restricted children’s access to AI chatbots, but Newsom sided with the tech industry, saying it would have led to a total ban on some AI tools for adolescents.",
"status": "publish",
"parent": 0,
"modified": 1760654510,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 17,
"wordCount": 810
},
"headData": {
"title": "Newsom Vetoes Most-Watched Children's AI Bill, Signs 16 Others Targeting Tech | KQED",
"description": "The legislation would have restricted children’s access to AI chatbots, but Newsom sided with the tech industry, saying it would have led to a total ban on some AI tools for adolescents.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Newsom Vetoes Most-Watched Children's AI Bill, Signs 16 Others Targeting Tech",
"datePublished": "2025-10-14T09:40:55-07:00",
"dateModified": "2025-10-16T15:41:50-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 13,
"slug": "politics",
"name": "Politics"
},
"audioUrl": "https://traffic.omny.fm/d/clips/0af137ef-751e-4b19-a055-aaef00d2d578/ffca7e9f-6831-4[…]f-aaef00f5a073/0c260676-5389-476a-a2ae-b377000ac4db/audio.mp3",
"sticky": false,
"nprStoryId": "kqed-12059714",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12059714/newsom-vetoes-most-watched-childrens-ai-bill-signs-16-others-targeting-tech",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>California Gov. \u003ca href=\"https://www.kqed.org/news/tag/gavin-newsom\">Gavin Newsom\u003c/a> vetoed legislation that would have prohibited developers from offering companion AI chatbots for children unless the companies can promise the software won’t encourage harmful behavior.\u003c/p>\n\u003cp>Sacramento players across the political spectrum watched the legislation closely, with advocates issuing press releases, open letters and \u003ca href=\"https://www.kqed.org/news/12038154/kids-talking-ai-companion-chatbots-stanford-researchers-say-thats-bad-idea\">research reports\u003c/a> in hopes of swaying California’s tech-friendly governor.\u003c/p>\n\u003cp>In his \u003ca href=\"https://www.gov.ca.gov/wp-content/uploads/2025/10/AB-1064-Veto.pdf\">veto message\u003c/a> published Monday, Newsom wrote that AB-1064 could lead to a total ban on minors using conversational AI tools. “AI is already shaping the world, and it is imperative that adolescents learn how to safely interact with AI systems,” Newsom wrote.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>“We’re sorely disappointed to see Governor Newsom side with Big Tech over the more than 150 families who have suffered the most unimaginable loss: the passing of their child, encouraged by companion AI,” Assemblymember Rebecca Bauer-Kahan of Orinda, the bill’s author, wrote in a statement, noting the bill was sponsored by Common Sense Media, California Attorney General Rob Bonta, and more than 20 organizations.\u003c/p>\n\u003cp>“These AI companies know the risks their products pose. They’ve made purposeful design decisions that put kids in harm’s way, creating chatbots that form dangerous emotional bonds with vulnerable young people,” she added.\u003c/p>\n\u003cfigure id=\"attachment_11917730\" class=\"wp-caption aligncenter\" style=\"max-width: 1920px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-11917730\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut.jpg\" alt=\"\" width=\"1920\" height=\"1440\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut.jpg 1920w, https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut-800x600.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut-1020x765.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut-160x120.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2022/06/RS56739_IMG_4228-qut-1536x1152.jpg 1536w\" sizes=\"auto, (max-width: 1920px) 100vw, 1920px\">\u003cfigcaption class=\"wp-caption-text\">Assemblywoman Rebecca Bauer-Kahan, D-Orinda, on Political Breakdown. \u003ccite>(Guy Marzorati/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>“We’ve seen suicides lately. We’ve seen all sorts of mental health disruptions caused by AI companions. That said, the fight is just beginning,” Common Sense Media CEO Jim Steyer told KQED. “California is clearly leading the way in the United States and globally on these issues, and the next year or two are going to be absolutely critical in defining regulations, guardrails and a common sense future for the big tech industry.”\u003c/p>\n\u003cp>The trade group TechNet lobbied heavily against the bill, \u003ca href=\"https://www.technet.org/the-impact-of-ca-ab-1064/\">running ads\u003c/a> that warned that Bauer-Kahan’s bill could deny children access to critical tools they need to succeed.\u003c/p>\n\u003cp>“We appreciate Governor Newsom’s thoughtful consideration and ultimate veto of this proposed legislation,” wrote Robert Boykin, TechNet’s Executive Director for California and the Southwest. “While TechNet shares the goal of AB 1064, the bill fails to meet its stated objectives while threatening students’ access to valuable AI-driven learning tools, potentially life-saving medical treatments, crisis response interventions, safety mechanisms, and other valuable AI technologies.”\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12059209",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/10/251008_ENDOF10_-9-KQED.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>The host of AI-related bills that made it to Newsom’s desk this legislative session presented him with a political \u003ca href=\"https://www.kqed.org/news/12052617/newsoms-tightrope-walk-between-ai-regulation-and-silicon-valley-cash\">balancing act\u003c/a>, as he eyes a \u003ca href=\"https://www.kqed.org/news/12043766/newsom-tries-to-find-political-footing-in-clash-with-trump\">run for the White House\u003c/a>. Many of the bills were opposed by trade associations heavily bankrolled by Silicon Valley, and California is home to \u003ca href=\"https://www.forbes.com/lists/ai50/\">32 of the 50 top AI companies\u003c/a> worldwide.\u003c/p>\n\u003cp>As if anticipating the blowback from child safety advocates, Newsom’s office released \u003ca href=\"https://www.gov.ca.gov/2025/10/13/governor-newsom-signs-bills-to-further-strengthen-californias-leadership-in-protecting-children-online/\">a list of 16 AI bills\u003c/a> he approved this session, some focused on children. “California has long stood as a bold leader in protecting children from the danger of emerging technology,” the statement read.\u003c/p>\n\u003cp>On the list: SB 243 by Sen. Steve Padilla, D-San Diego, which placed softer limits on AI chatbots for kids. Advocacy groups, including Common Sense Media and \u003ca href=\"https://techoversight.org/wp-content/uploads/2025/09/SB-243-Remove-Support.pdf\">Tech Oversight California\u003c/a>, pulled their support from the bill in mid-September, arguing industry-friendly amendments weakened it and could establish a “dangerous” precedent for other states and countries taking California’s lead on AI regulation.\u003c/p>\n\u003cp>In a similar vein, Newsom signed an \u003ca href=\"https://www.kqed.org/news/12058013/newsom-signs-california-ai-transparency-bill-tailored-to-meet-tech-industry-tastes\">industry-friendly version\u003c/a> of SB-53 by Sen. Scott Wiener (D-San Francisco), after his original effort became target No. 1 for Silicon Valley lobbyists\u003ca href=\"https://www.kqed.org/news/12007323/can-california-still-lead-on-ai-regulation-following-newsoms-veto-of-ai-safety-bill\"> last legislative session\u003c/a> and died on Newsom’s desk.\u003c/p>\n\u003cp>But not all the bills Newsom signed this legislation session lack teeth.\u003c/p>\n\u003cp>AB 621, for instance, expands the ability of deepfake pornography victims to sue anyone who creates, digitally alters, or distributes a sexually explicit image or video in which they appear to engage in sexual conduct without their consent. The expanded private right of action is considered a notable strength when most other AI bills rely on regulatory enforcement, penalties, or agency reporting to sway business practices.\u003c/p>\n\u003cp>Steyer said he was happy to see Newsom’s signature on AB 56, which supporters say will require first-in-the-nation warning labels on social media, similar to what California has mandated on packaging for alcohol and cigarettes.\u003c/p>\n\u003cp>“It’s clear that Gov. Newsom, and also the first partner, Jennifer Siebel Newsom, who’s heavily involved in all this legislation, have listened to parents, and advocacy groups around the state,” Steyer said.\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12059714/newsom-vetoes-most-watched-childrens-ai-bill-signs-16-others-targeting-tech",
"authors": [
"251"
],
"categories": [
"news_31795",
"news_8",
"news_13",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_34755",
"news_18538",
"news_22307",
"news_32668",
"news_30826",
"news_16",
"news_34532",
"news_34586",
"news_21285",
"news_1631",
"news_20385"
],
"featImg": "news_12051437",
"label": "news"
},
"news_12058013": {
"type": "posts",
"id": "news_12058013",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12058013",
"score": null,
"sort": [
1759183178000
]
},
"guestAuthors": [],
"slug": "newsom-signs-california-ai-transparency-bill-tailored-to-meet-tech-industry-tastes",
"title": "Newsom Signs California AI Transparency Bill Tailored to Meet Tech Industry Tastes",
"publishDate": 1759183178,
"format": "standard",
"headTitle": "Newsom Signs California AI Transparency Bill Tailored to Meet Tech Industry Tastes | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>Gov. Gavin Newsom today \u003ca href=\"https://www.gov.ca.gov/2025/09/29/governor-newsom-signs-sb-53-advancing-californias-world-leading-artificial-intelligence-industry/\">signed\u003c/a> into law\u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB53\"> Senate Bill 53\u003c/a>, which would require large model developers like Anthropic and Open AI to be transparent about safety measures they put in place to prevent catastrophic events. The legislation would also create CalCompute, a public cloud infrastructure that expands access to AI resources for researchers, startups and public institutions.\u003c/p>\n\u003cp>In announcing his decision, Newsom wrote, “California has proven that we can establish regulations to protect our communities while also ensuring that the growing AI industry continues to thrive. This legislation strikes that balance.”\u003c/p>\n\u003cp>Senator Scott Wiener (D-San Francisco) authored the bill, after his original effort became target No. 1 for Silicon Valley lobbyists\u003ca href=\"https://www.kqed.org/news/12007323/can-california-still-lead-on-ai-regulation-following-newsoms-veto-of-ai-safety-bill\"> last legislative session\u003c/a> and died on Newsom’s desk. That bill spooked high-profile California politicians, including\u003ca href=\"https://www.kqed.org/news/12002254/california-bill-to-regulate-catastrophic-effects-of-ai-heads-to-newsoms-desk\"> Nancy Pelosi,\u003c/a> nervous about getting on the wrong side of Big Tech. In last year’s veto message for SB 1047, Newsom announced a working group on AI, which helped lay the groundwork for \u003ca href=\"https://www.kqed.org/news/12020857/california-lawmaker-ready-revive-fight-regulating-ai\">SB 53\u003c/a>.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>“With a technology as transformative as AI, we have a responsibility to support that innovation while putting in place commonsense guardrails to understand and reduce risk,” \u003ca href=\"https://sd11.senate.ca.gov/news/governor-newsom-signs-senator-wieners-landmark-ai-law-set-commonsense-guardrails-boost\">wrote\u003c/a> Wiener. “I’m grateful to the Governor for his leadership in convening the Joint California AI Policy Working Group, working with us to refine the legislation, and now signing it into law.”\u003c/p>\n\u003cp>The working group issued its\u003ca href=\"https://www.gov.ca.gov/wp-content/uploads/2025/06/June-17-2025-%E2%80%93-The-California-Report-on-Frontier-AI-Policy.pdf\"> report\u003c/a> in June, calling on lawmakers to pass transparency requirements and whistleblower protections, declaring that California has the “responsibility” to ensure the safety of generative artificial intelligence software, “so that their benefit to society can be realized.”\u003c/p>\n\u003cfigure id=\"attachment_12058035\" class=\"wp-caption aligncenter\" style=\"max-width: 2560px\">\u003ca href=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-scaled.jpg\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12058035\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-scaled.jpg\" alt=\"\" width=\"2560\" height=\"1707\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-scaled.jpg 2560w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-2000x1333.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-1536x1024.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-2048x1365.jpg 2048w\" sizes=\"auto, (max-width: 2560px) 100vw, 2560px\">\u003c/a>\u003cfigcaption class=\"wp-caption-text\">Close-up of phone screen displaying Anthropic Claude, a Large Language Model (LLM) powered generative artificial intelligence chatbot, in Lafayette, California, June 27, 2024. \u003ccite>(Photo by Smith Collection/Gado/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The report noted that AI systems have been observed finding loopholes that allow them to behave in ways their programmers did not intend. Also, that competitive pressures are undermining safety, and policy intervention is needed to prevent a race to the bottom.\u003c/p>\n\u003cp>Anthropic, which makes the chatbot Claude, was the first major AI developer to endorse SB 53, having offered more cautious support for SB 1047. “We’re proud to have worked with Senator Wiener to help bring industry to the table and develop practical safeguards that create real accountability for how powerful AI systems are developed and deployed, which will in turn keep everyone safer as the rapid acceleration of AI capabilities continues,” wrote Jack Clark, co-founder and head of policy for Anthropic.[aside postID=news_12052617 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GETTYIMAGES-2228237489-KQED.jpg']Federal lawmakers on both sides of the aisle have historically taken a relatively\u003ca href=\"https://www.kqed.org/news/11905230/do-federal-lawmakers-have-the-stomach-to-rein-in-big-tech\"> light touch\u003c/a> toward regulating the technology industry. Despite high-drama hearings about troubling trends in social media and now AI, few bills make it out of their respective committees, let alone to a floor vote. “While federal standards remain essential to avoid a patchwork of state regulations, California has created a strong framework that balances public safety with continued innovation,” Clark added.\u003c/p>\n\u003cp>This time around, other AI developers got behind Wiener’s effort. “Meta supports balanced AI regulation and the California Frontier AI law is a positive step in that direction,” a spokesperson for Meta wrote in a statement.\u003c/p>\n\u003cp>Earlier this year, a coalition of more than 20 tech and youth safety advocacy organizations\u003ca href=\"https://encodeai.org/wp-content/uploads/2025/09/SB-53-Coalition-Letter-9_24_2025.pdf\"> sent a letter\u003c/a> to Gov. Newsom in support of SB 53. “If basic guardrails like this had existed at the inception of social media, our children could be living in a safer, healthier world,” the letter said.\u003c/p>\n\u003cp>“We are incredibly proud to have worked with Senator Wiener and Governor Newsom on this AI safety legislation,” wrote Sneha Revanur, founder of Encode AI, a youth-led nonprofit that pushes for responsible AI through policy. The group was one of the primary drivers behind that coalition. “Frontier AI models have immense potential but without proper oversight, they can create real risks and harms. California has shown it’s possible to lead on AI safety without stifling progress.”\u003c/p>\n\u003cp>The bill was opposed by business and industry representatives, including the California Chamber of Commerce, TechNet and Silicon Valley Leadership Group and TechNet.\u003c/p>\n\u003cp>“It’s vital that we strengthen California’s role as the global leader in AI and the epicenter of innovation. SVLG is committed to advocating for policies that seek to responsibly scale this transformative technology at this pivotal juncture and to unleash a new wave of innovation and growth,” Ahmad Thomas, CEO of Silicon Valley Leadership Group, wrote in a statement. “We will continue to work with the Governor and leaders in the Legislature to ensure that new laws and regulations don’t impose undue burdens on the most innovative companies in the world.”\u003c/p>\n\u003cp> \u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "Gov. Gavin Newsom signed State Senator Scott Wiener’s SB 53, which aims to put safety guardrails on AI development while not squashing the growing AI industry. ",
"status": "publish",
"parent": 0,
"modified": 1759246357,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 14,
"wordCount": 838
},
"headData": {
"title": "Newsom Signs California AI Transparency Bill Tailored to Meet Tech Industry Tastes | KQED",
"description": "Gov. Gavin Newsom signed State Senator Scott Wiener’s SB 53, which aims to put safety guardrails on AI development while not squashing the growing AI industry. ",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "Newsom Signs California AI Transparency Bill Tailored to Meet Tech Industry Tastes",
"datePublished": "2025-09-29T14:59:38-07:00",
"dateModified": "2025-09-30T08:32:37-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 8,
"slug": "news",
"name": "News"
},
"sticky": false,
"nprStoryId": "kqed-12058013",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12058013/newsom-signs-california-ai-transparency-bill-tailored-to-meet-tech-industry-tastes",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>Gov. Gavin Newsom today \u003ca href=\"https://www.gov.ca.gov/2025/09/29/governor-newsom-signs-sb-53-advancing-californias-world-leading-artificial-intelligence-industry/\">signed\u003c/a> into law\u003ca href=\"https://leginfo.legislature.ca.gov/faces/billNavClient.xhtml?bill_id=202520260SB53\"> Senate Bill 53\u003c/a>, which would require large model developers like Anthropic and Open AI to be transparent about safety measures they put in place to prevent catastrophic events. The legislation would also create CalCompute, a public cloud infrastructure that expands access to AI resources for researchers, startups and public institutions.\u003c/p>\n\u003cp>In announcing his decision, Newsom wrote, “California has proven that we can establish regulations to protect our communities while also ensuring that the growing AI industry continues to thrive. This legislation strikes that balance.”\u003c/p>\n\u003cp>Senator Scott Wiener (D-San Francisco) authored the bill, after his original effort became target No. 1 for Silicon Valley lobbyists\u003ca href=\"https://www.kqed.org/news/12007323/can-california-still-lead-on-ai-regulation-following-newsoms-veto-of-ai-safety-bill\"> last legislative session\u003c/a> and died on Newsom’s desk. That bill spooked high-profile California politicians, including\u003ca href=\"https://www.kqed.org/news/12002254/california-bill-to-regulate-catastrophic-effects-of-ai-heads-to-newsoms-desk\"> Nancy Pelosi,\u003c/a> nervous about getting on the wrong side of Big Tech. In last year’s veto message for SB 1047, Newsom announced a working group on AI, which helped lay the groundwork for \u003ca href=\"https://www.kqed.org/news/12020857/california-lawmaker-ready-revive-fight-regulating-ai\">SB 53\u003c/a>.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>“With a technology as transformative as AI, we have a responsibility to support that innovation while putting in place commonsense guardrails to understand and reduce risk,” \u003ca href=\"https://sd11.senate.ca.gov/news/governor-newsom-signs-senator-wieners-landmark-ai-law-set-commonsense-guardrails-boost\">wrote\u003c/a> Wiener. “I’m grateful to the Governor for his leadership in convening the Joint California AI Policy Working Group, working with us to refine the legislation, and now signing it into law.”\u003c/p>\n\u003cp>The working group issued its\u003ca href=\"https://www.gov.ca.gov/wp-content/uploads/2025/06/June-17-2025-%E2%80%93-The-California-Report-on-Frontier-AI-Policy.pdf\"> report\u003c/a> in June, calling on lawmakers to pass transparency requirements and whistleblower protections, declaring that California has the “responsibility” to ensure the safety of generative artificial intelligence software, “so that their benefit to society can be realized.”\u003c/p>\n\u003cfigure id=\"attachment_12058035\" class=\"wp-caption aligncenter\" style=\"max-width: 2560px\">\u003ca href=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-scaled.jpg\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12058035\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-scaled.jpg\" alt=\"\" width=\"2560\" height=\"1707\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-scaled.jpg 2560w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-2000x1333.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-1536x1024.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/09/GettyImages-2159671948-2048x1365.jpg 2048w\" sizes=\"auto, (max-width: 2560px) 100vw, 2560px\">\u003c/a>\u003cfigcaption class=\"wp-caption-text\">Close-up of phone screen displaying Anthropic Claude, a Large Language Model (LLM) powered generative artificial intelligence chatbot, in Lafayette, California, June 27, 2024. \u003ccite>(Photo by Smith Collection/Gado/Getty Images)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>The report noted that AI systems have been observed finding loopholes that allow them to behave in ways their programmers did not intend. Also, that competitive pressures are undermining safety, and policy intervention is needed to prevent a race to the bottom.\u003c/p>\n\u003cp>Anthropic, which makes the chatbot Claude, was the first major AI developer to endorse SB 53, having offered more cautious support for SB 1047. “We’re proud to have worked with Senator Wiener to help bring industry to the table and develop practical safeguards that create real accountability for how powerful AI systems are developed and deployed, which will in turn keep everyone safer as the rapid acceleration of AI capabilities continues,” wrote Jack Clark, co-founder and head of policy for Anthropic.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12052617",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GETTYIMAGES-2228237489-KQED.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Federal lawmakers on both sides of the aisle have historically taken a relatively\u003ca href=\"https://www.kqed.org/news/11905230/do-federal-lawmakers-have-the-stomach-to-rein-in-big-tech\"> light touch\u003c/a> toward regulating the technology industry. Despite high-drama hearings about troubling trends in social media and now AI, few bills make it out of their respective committees, let alone to a floor vote. “While federal standards remain essential to avoid a patchwork of state regulations, California has created a strong framework that balances public safety with continued innovation,” Clark added.\u003c/p>\n\u003cp>This time around, other AI developers got behind Wiener’s effort. “Meta supports balanced AI regulation and the California Frontier AI law is a positive step in that direction,” a spokesperson for Meta wrote in a statement.\u003c/p>\n\u003cp>Earlier this year, a coalition of more than 20 tech and youth safety advocacy organizations\u003ca href=\"https://encodeai.org/wp-content/uploads/2025/09/SB-53-Coalition-Letter-9_24_2025.pdf\"> sent a letter\u003c/a> to Gov. Newsom in support of SB 53. “If basic guardrails like this had existed at the inception of social media, our children could be living in a safer, healthier world,” the letter said.\u003c/p>\n\u003cp>“We are incredibly proud to have worked with Senator Wiener and Governor Newsom on this AI safety legislation,” wrote Sneha Revanur, founder of Encode AI, a youth-led nonprofit that pushes for responsible AI through policy. The group was one of the primary drivers behind that coalition. “Frontier AI models have immense potential but without proper oversight, they can create real risks and harms. California has shown it’s possible to lead on AI safety without stifling progress.”\u003c/p>\n\u003cp>The bill was opposed by business and industry representatives, including the California Chamber of Commerce, TechNet and Silicon Valley Leadership Group and TechNet.\u003c/p>\n\u003cp>“It’s vital that we strengthen California’s role as the global leader in AI and the epicenter of innovation. SVLG is committed to advocating for policies that seek to responsibly scale this transformative technology at this pivotal juncture and to unleash a new wave of innovation and growth,” Ahmad Thomas, CEO of Silicon Valley Leadership Group, wrote in a statement. “We will continue to work with the Governor and leaders in the Legislature to ensure that new laws and regulations don’t impose undue burdens on the most innovative companies in the world.”\u003c/p>\n\u003cp> \u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12058013/newsom-signs-california-ai-transparency-bill-tailored-to-meet-tech-industry-tastes",
"authors": [
"251"
],
"categories": [
"news_31795",
"news_28250",
"news_8",
"news_13",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_34755",
"news_18538",
"news_22307",
"news_32668",
"news_27626",
"news_16",
"news_34586",
"news_1631"
],
"featImg": "news_12051438",
"label": "news"
},
"news_12055125": {
"type": "posts",
"id": "news_12055125",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12055125",
"score": null,
"sort": [
1757358890000
]
},
"guestAuthors": [],
"slug": "sfs-anthropic-backs-california-ai-safety-bill-after-newsom-vetoed-1st-attempt",
"title": "SF’s Anthropic Backs California AI Safety Bill After Newsom Vetoed 1st Attempt",
"publishDate": 1757358890,
"format": "standard",
"headTitle": "SF’s Anthropic Backs California AI Safety Bill After Newsom Vetoed 1st Attempt | KQED",
"labelTerm": {
"site": "news"
},
"content": "\u003cp>San Francisco-based chatbot maker Anthropic said Monday that it is backing\u003ca href=\"https://leginfo.legislature.ca.gov/faces/billTextClient.xhtml?bill_id=202520260SB53\"> landmark artificial intelligence \u003c/a>legislation in California, one year after the first attempt to regulate the burgeoning industry \u003ca href=\"https://www.kqed.org/news/12007087/california-blinks-governor-newsom-vetoes-ai-bill-aimed-at-catastrophic-harms\">ended in a veto\u003c/a> by Gov. Gavin Newsom.\u003c/p>\n\u003cp>The endorsement makes \u003ca href=\"https://www.anthropic.com/\">Anthropic\u003c/a> the first large AI company to back SB 53 by Sen. Scott Wiener (D-San Francisco), which would require the biggest AI companies to disclose their safety and security protocols and report critical safety incidents to the governor’s office within 15 days. It would also offer protections to whistleblowers at AI companies of any size who call out risky or dangerous behavior.\u003c/p>\n\u003cp>Anthropic’s support is a big win for Wiener, but the bill still faces opposition from large business and tech groups. Still, the company’s support could help Wiener get the bill over the finish line — and may help convince Newsom, who, after vetoing last year’s bill, created a \u003ca href=\"https://www.cafrontieraigov.org/\">working group to develop recommendations for AI regulation\u003c/a>. Wiener said he crafted SB 53 to align with that group’s findings.\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>In an interview with KQED, Anthropic CEO Dario Amodei said Wiener’s bill, which was \u003ca href=\"https://subscriber.politicopro.com/article/2025/09/scott-wiener-significantly-amends-ai-transparency-bill-with-apparent-input-from-openai-others-00548813\">narrowed last week\u003c/a> to apply only to large AI programs, strikes a balance between safety and progress.\u003c/p>\n\u003cp>“SB 53 focuses on a particular subset of the risks, what we call catastrophic risks, which are some of the kinds of the largest things that can go wrong — things like large-scale cyberattacks,” he said. “It forces companies to be transparent about how they measure these risks, about the safety and security testing that they do in order to quantify these risks. This is something that Anthropic actually already does.”\u003c/p>\n\u003cfigure id=\"attachment_12039671\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12039671\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/250218-SFDowntown-03-BL_qed.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/250218-SFDowntown-03-BL_qed.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/250218-SFDowntown-03-BL_qed-800x533.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/250218-SFDowntown-03-BL_qed-1020x680.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/250218-SFDowntown-03-BL_qed-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/250218-SFDowntown-03-BL_qed-1536x1024.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/250218-SFDowntown-03-BL_qed-1920x1280.jpg 1920w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Sen. Scott Wiener speaks during a press conference in Union Square, San Francisco, on Feb. 18, 2025. \u003ccite>(Beth LaBerge/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Amodei noted that when Anthropic tested its latest AI model, Claude 4, earlier this year, researchers \u003ca href=\"https://www.axios.com/2025/05/23/anthropic-ai-deception-risk\">found some concerning responses\u003c/a>, like “resisting being shut down or blackmailing employees.” That testing, he said, is being conducted even as Anthropic continues to grow.\u003c/p>\n\u003cp>“We’ve managed to do that while being a profitable and fast-growing company. And so our thinking is that if we can do that, the other large companies in the space can do that as well,” he said.\u003c/p>\n\u003cp>Congress has so far failed to enact any sort of national AI regulations. Wiener’s first attempt last year, praised as “a promising first step” by Anthropic co-founder Jack Clark, drew criticism from Newsom in his veto message over its limited focus on the largest models.[aside postID=news_12052617 hero='https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GETTYIMAGES-2228237489-KQED.jpg']Amodei said he thinks SB 53 could go even further, but he did have concerns about how prescriptive last year’s legislation was.\u003c/p>\n\u003cp>“We saw a good idea at the heart of it, but we were concerned, actually, that with the field moving so fast, having to comply with all these precise tests would be too rigid and they would kind of quickly become out of date,” Amodei said. “So I would say we had mixed feelings about it.”\u003c/p>\n\u003cp>SB 53, he said, strikes a good balance — in part because it does distinguish between large and small AI companies.\u003c/p>\n\u003cp>“We don’t want to stifle competition or even just be accused of stifling competition,” he said.\u003c/p>\n\u003cp>In a written statement, Wiener praised Amodei, saying that under his leadership, “Anthropic has been a courageous and steadfast champion for innovating safely and responsibly.”\u003c/p>\n\u003cp>“It can be difficult to tell where many AI companies will come down on safety issues. Never Anthropic,” he added. “I’m grateful to have support from a homegrown San Francisco company that’s shown the world it’s possible to lead on both responsible practices and product performance. The two aren’t mutually exclusive.”\u003c/p>\n\u003cp>The legislation must pass both houses and be sent to Newsom by Friday. He will have a month to decide whether to sign or veto the bill.\u003c/p>\n\u003cp>\u003c/p>\n",
"blocks": [],
"excerpt": "The endorsement makes Anthropic the first large AI company to back the landmark artificial intelligence legislation by state Sen. Scott Wiener.",
"status": "publish",
"parent": 0,
"modified": 1757358890,
"stats": {
"hasAudio": false,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 16,
"wordCount": 708
},
"headData": {
"title": "SF’s Anthropic Backs California AI Safety Bill After Newsom Vetoed 1st Attempt | KQED",
"description": "The endorsement makes Anthropic the first large AI company to back the landmark artificial intelligence legislation by state Sen. Scott Wiener.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "SF’s Anthropic Backs California AI Safety Bill After Newsom Vetoed 1st Attempt",
"datePublished": "2025-09-08T12:14:50-07:00",
"dateModified": "2025-09-08T12:14:50-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 248,
"slug": "technology",
"name": "Technology"
},
"sticky": false,
"nprStoryId": "kqed-12055125",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12055125/sfs-anthropic-backs-california-ai-safety-bill-after-newsom-vetoed-1st-attempt",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>San Francisco-based chatbot maker Anthropic said Monday that it is backing\u003ca href=\"https://leginfo.legislature.ca.gov/faces/billTextClient.xhtml?bill_id=202520260SB53\"> landmark artificial intelligence \u003c/a>legislation in California, one year after the first attempt to regulate the burgeoning industry \u003ca href=\"https://www.kqed.org/news/12007087/california-blinks-governor-newsom-vetoes-ai-bill-aimed-at-catastrophic-harms\">ended in a veto\u003c/a> by Gov. Gavin Newsom.\u003c/p>\n\u003cp>The endorsement makes \u003ca href=\"https://www.anthropic.com/\">Anthropic\u003c/a> the first large AI company to back SB 53 by Sen. Scott Wiener (D-San Francisco), which would require the biggest AI companies to disclose their safety and security protocols and report critical safety incidents to the governor’s office within 15 days. It would also offer protections to whistleblowers at AI companies of any size who call out risky or dangerous behavior.\u003c/p>\n\u003cp>Anthropic’s support is a big win for Wiener, but the bill still faces opposition from large business and tech groups. Still, the company’s support could help Wiener get the bill over the finish line — and may help convince Newsom, who, after vetoing last year’s bill, created a \u003ca href=\"https://www.cafrontieraigov.org/\">working group to develop recommendations for AI regulation\u003c/a>. Wiener said he crafted SB 53 to align with that group’s findings.\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>In an interview with KQED, Anthropic CEO Dario Amodei said Wiener’s bill, which was \u003ca href=\"https://subscriber.politicopro.com/article/2025/09/scott-wiener-significantly-amends-ai-transparency-bill-with-apparent-input-from-openai-others-00548813\">narrowed last week\u003c/a> to apply only to large AI programs, strikes a balance between safety and progress.\u003c/p>\n\u003cp>“SB 53 focuses on a particular subset of the risks, what we call catastrophic risks, which are some of the kinds of the largest things that can go wrong — things like large-scale cyberattacks,” he said. “It forces companies to be transparent about how they measure these risks, about the safety and security testing that they do in order to quantify these risks. This is something that Anthropic actually already does.”\u003c/p>\n\u003cfigure id=\"attachment_12039671\" class=\"wp-caption aligncenter\" style=\"max-width: 2000px\">\u003cimg loading=\"lazy\" decoding=\"async\" class=\"size-full wp-image-12039671\" src=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/250218-SFDowntown-03-BL_qed.jpg\" alt=\"\" width=\"2000\" height=\"1333\" srcset=\"https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/250218-SFDowntown-03-BL_qed.jpg 2000w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/250218-SFDowntown-03-BL_qed-800x533.jpg 800w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/250218-SFDowntown-03-BL_qed-1020x680.jpg 1020w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/250218-SFDowntown-03-BL_qed-160x107.jpg 160w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/250218-SFDowntown-03-BL_qed-1536x1024.jpg 1536w, https://cdn.kqed.org/wp-content/uploads/sites/10/2025/05/250218-SFDowntown-03-BL_qed-1920x1280.jpg 1920w\" sizes=\"auto, (max-width: 2000px) 100vw, 2000px\">\u003cfigcaption class=\"wp-caption-text\">Sen. Scott Wiener speaks during a press conference in Union Square, San Francisco, on Feb. 18, 2025. \u003ccite>(Beth LaBerge/KQED)\u003c/cite>\u003c/figcaption>\u003c/figure>\n\u003cp>Amodei noted that when Anthropic tested its latest AI model, Claude 4, earlier this year, researchers \u003ca href=\"https://www.axios.com/2025/05/23/anthropic-ai-deception-risk\">found some concerning responses\u003c/a>, like “resisting being shut down or blackmailing employees.” That testing, he said, is being conducted even as Anthropic continues to grow.\u003c/p>\n\u003cp>“We’ve managed to do that while being a profitable and fast-growing company. And so our thinking is that if we can do that, the other large companies in the space can do that as well,” he said.\u003c/p>\n\u003cp>Congress has so far failed to enact any sort of national AI regulations. Wiener’s first attempt last year, praised as “a promising first step” by Anthropic co-founder Jack Clark, drew criticism from Newsom in his veto message over its limited focus on the largest models.\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "aside",
"attributes": {
"named": {
"postid": "news_12052617",
"hero": "https://cdn.kqed.org/wp-content/uploads/sites/10/2025/08/GETTYIMAGES-2228237489-KQED.jpg",
"label": ""
},
"numeric": []
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>Amodei said he thinks SB 53 could go even further, but he did have concerns about how prescriptive last year’s legislation was.\u003c/p>\n\u003cp>“We saw a good idea at the heart of it, but we were concerned, actually, that with the field moving so fast, having to comply with all these precise tests would be too rigid and they would kind of quickly become out of date,” Amodei said. “So I would say we had mixed feelings about it.”\u003c/p>\n\u003cp>SB 53, he said, strikes a good balance — in part because it does distinguish between large and small AI companies.\u003c/p>\n\u003cp>“We don’t want to stifle competition or even just be accused of stifling competition,” he said.\u003c/p>\n\u003cp>In a written statement, Wiener praised Amodei, saying that under his leadership, “Anthropic has been a courageous and steadfast champion for innovating safely and responsibly.”\u003c/p>\n\u003cp>“It can be difficult to tell where many AI companies will come down on safety issues. Never Anthropic,” he added. “I’m grateful to have support from a homegrown San Francisco company that’s shown the world it’s possible to lead on both responsible practices and product performance. The two aren’t mutually exclusive.”\u003c/p>\n\u003cp>The legislation must pass both houses and be sent to Newsom by Friday. He will have a month to decide whether to sign or veto the bill.\u003c/p>\n\u003cp>\u003c/p>\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12055125/sfs-anthropic-backs-california-ai-safety-bill-after-newsom-vetoed-1st-attempt",
"authors": [
"3239"
],
"categories": [
"news_31795",
"news_8",
"news_13",
"news_248"
],
"tags": [
"news_25184",
"news_32664",
"news_34755",
"news_34377",
"news_35758",
"news_17968",
"news_34586",
"news_1631"
],
"featImg": "news_12055158",
"label": "news"
},
"news_12054417": {
"type": "posts",
"id": "news_12054417",
"meta": {
"index": "posts_1716263798",
"site": "news",
"id": "12054417",
"score": null,
"sort": [
1756893638000
]
},
"guestAuthors": [],
"slug": "we-have-to-and-are-proud-to-big-tech-embraces-the-u-s-military",
"title": "‘We Have To, and Are Proud To’: Silicon Valley Embraces the U.S. Military",
"publishDate": 1756893638,
"format": "audio",
"headTitle": "‘We Have To, and Are Proud To’: Silicon Valley Embraces the U.S. Military | KQED",
"labelTerm": {},
"content": "\u003cp>A decade ago, most major tech companies swore off working with the U.S. military. Google, Meta and OpenAI even once had policies banning the use of AI in weapons.\u003c/p>\n\u003cp>But times have changed, and now Silicon Valley is fully embracing contracts and collaborations with the military. Sheera Frenkel, tech reporter with the New York Times, explains how and why this shift occurred.\u003c/p>\n\u003cp>\u003cem>\u003ci>Some members of the KQED podcast team are represented by The Screen Actors Guild, American Federation of Television and Radio Artists. San Francisco Northern California Local.\u003c/i>\u003c/em>\u003c/p>\n\u003cp>\u003cstrong>Links:\u003c/strong>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://www.nytimes.com/2025/08/04/technology/google-meta-openai-military-war.html\">The Militarization of Silicon Valley\u003c/a>\u003c/li>\n\u003c/ul>\n\u003cp>\u003c!-- iframe plugin v.4.3 wordpress.org/plugins/iframe/ -->\u003cbr>\n\u003ciframe loading=\"lazy\" frameborder=\"0\" height=\"200\" scrolling=\"no\" src=\"https://playlist.megaphone.fm?e=KQINC5559995627&light=true\" width=\"100%\" class=\"iframe-class\">\u003c/iframe>\u003c/p>\n\u003cp>[ad fullwidth]\u003c/p>\n\u003cp>\u003cem>This is a computer-generated transcript. While our team has reviewed it, there may be errors.\u003c/em>\u003c/p>\n\n\u003cp>\u003cstrong>Jessica Kariisa \u003c/strong>[00:00:00] I’m Jessica Kariisa, and welcome to The Bay, local news to keep you rooted.\u003c/p>\n\u003cp>\u003cstrong>Ambi \u003c/strong>[00:00:05] Ladies and gentlemen, welcome to the Army Jacket Ceremony and the Commissioning Ceremony for Detachment 201.\u003c/p>\n\u003cp>\u003cstrong>Jessica Kariisa \u003c/strong>[00:00:13] In June of this year, four current and former executives from Meta, OpenAI, and Palantir took center stage at a ceremony at the Joint Base Meyer Henderson Hall in Arlington, Virginia. Wearing combat gear and boots, the executives were there for their swearing-in ceremony as Lieutenant Colonels in Detachment 201. A new unit to advise the Army on new technology for use in combat.\u003c/p>\n\u003cp>\u003cstrong>Ambi \u003c/strong>[00:00:45] In an era defined by information warfare, automation, and digital disruption, the army needs skilled technologists in its ranks now more than ever.\u003c/p>\n\u003cp>\u003cstrong>Jessica Kariisa \u003c/strong>[00:00:58] Big tech has embraced the U.S. Military. It’s a dramatic shift from just a decade ago when most of Silicon Valley was firmly against helping the government wage war. These days, tech executives are singing a different tune.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:01:14] You’re seeing a lot of posting about how great America is and how proud they are to be Americans doing business in America. That’s a shift and it’s really noticeable among the top executives.\u003c/p>\n\u003cp>\u003cstrong>Jessica Kariisa \u003c/strong>[00:01:30] Today, Sheera Frenkel from The New York Times talks with The Bay’s host, Ericka Cruz Guevarra, about how Silicon Valley changed its mind on working with the military.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:01:51] Sheera, I guess how might you describe how tight Silicon Valley and the U.S. Government and U. S. Military in particular are these days?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:02:01] We are in a moment of exceptional closeness between the U.S. government and Silicon Valley, and that is really unusual. Silicon Valley had its origins with funding from the U.S. Government. But until now, there has not been this kind of widespread across the board move of Silicon Valley, you know, big companies, executives working closely with the U S military and having the kind of technology that’s actually useful for them. This is a region that saw itself as liberal, progressive, independent, connecting the world. That was a big motto. This idea that it was really international and it was about the good of all humankind, and not something that was specifically wedded to kind of an American patriotism. There’ve been figures, there’ve been characters, there’s been companies that have been public about their want and their need to work with the U.S. Government, but as much as a decade ago, there was widespread protests across Silicon Valley by the employee base at the idea of working closely with the government.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:03:12] Yeah, don’t be evil, right, as Google used to say. And I’m thinking, you mentioned the protests, I’m thinking back to 2018 and Google when there were these mass protests by employees there around Google’s involvement in a Pentagon program, right? Can you just remind me of that era of Google, of this like don’t-be-evil sort of motto?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:03:38] That was an era where people came to work at Google, they would graduate from the top universities in the United States. And as people in their early 20s, they saw it as this just really sort of do good, do positive things for the world kind of company. And executives fed into it, this idea of it’s bottom-up kind of culture and we listen to every employee and if you guys protest, we want to hear about it.\u003c/p>\n\u003cp>\u003cstrong>News clip \u003c/strong>[00:04:03] A letter to Google CEO Sundar Pichai is signed by more than 3,000 Google workers. Here’s what it says, quote, we believe Google should not be in the business of war, therefore we ask that Project Maven be canceled and that Google draft publicize.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:04:18] And so when Google employees came out en masse and said they did not want executives to pursue a contract with the U.S. Government with the Pentagon, executives listened and they backed down. And you saw employees at smaller companies across Silicon Valley taking note.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:04:31] And I remember the protests not just being effective in stopping the collaboration with this program but it literally became policy at Google to not pursue contracts with the US Military right?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:04:50] Three of the the biggest companies, Meta, OpenAI, and Google, all changed their terms of service so that they would not work with the U.S. Government and that specifically their AI technology wouldn’t be used to help build defense systems. It was literally, we’re going to create policy so that our systems can’t be used for defense or for military purposes. That’s how strongly these executives doubled down on what their employees were asking for.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:05:16] Around this time, Sheera, is it fair to say that everyone in tech was pretty much against military contracts?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:05:24] I wouldn’t say everyone because you had outliers. You had companies like Palantir, who was very outspoken about their work with the US government. They, in fact, sued the army to get a contract because they were so keen on being a tech company that was very out, very public, very aggressive about wanting to be a tech companies that worked with the U.S. Military.\u003c/p>\n\u003cp>\u003cstrong>Alex Karp \u003c/strong>[00:05:47] And while there, you had the idea for Palantir? Yeah, well, you know, post 9-11, I think the idea, again, it was Silicon Valley ought to be involved in fighting terrorism and protecting our civil liberties.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:05:59] Alex Karp, the CEO of Palantir, talks about the importance of working with the government all the time.\u003c/p>\n\u003cp>\u003cstrong>Alex Karp \u003c/strong>[00:06:05] We are kind of the greatest democracy in the world, and we tend to win wars where the people believe in what they’re doing. Where the people think that there’s a trade-off between civil liberties and fighting cyber terrorists, it’s going to be very hard to win.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:06:17] I just remember how clear it was that they were outliers at that time to what the rest of kind of the Silicon Valley companies were feeling and doing and saying.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:06:31] And for folks who maybe aren’t as familiar with Palantir, what do they do?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:06:35] Palantire is a funny company in that they had a certain mysterious aura around them for a long time, and I think they encouraged that by not saying much about what they did. They build systems. They build data systems that can analyze data, that can process it, that can draw conclusions. For instance, they work across the U.S. Federal government, and they’ll come into a place and say, right, here is all the data you sit on. We are not just going to organize it for you, we’re going to make it easy for you visualize it, to analyze it, our AI will draw conclusions. So for a long time, they were used by police departments, for instance, or they were used by different intelligence services to help look at their own data and sort of be able to understand it, even if you were not necessarily a technically minded person.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:07:23] I guess we’re talking now because, as you’re just talking about, Palantir was sort of this outlier among tech companies, really among one of the only ones really working closely with the U.S. Military, but increasingly they’re someone that other tech companies are becoming more and more jealous of these days, it seems like.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:07:44] Yeah, it’s really interesting. It’s come full circle. All these tech companies that, you know, stepped away from the US government are now looking at Palantir’s incredibly lucrative contracts across the US Government. Each one of these contracts can be worth hundreds of millions of dollars. And once you are working with the US government, they’re pretty faithful as clients. So you’re looking at these contracts that are going to give you amazing revenue year after year. And they want to work with American companies. They seek out American companies. And so I’ve heard some pretty senior executives at Meta and at Google say quite plainly, like, we’re jealous. We wish we were in there sooner.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:08:26] What exactly has changed here? Like, how did a company like Google go from don’t be evil to now attempting, it looks like, to pursue contracts with the US military? Like, what is this change?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:08:39] I think an executive at Google would say, well, we’ve rethought what it looks like to be evil. A couple things have happened in the last five years or so that have shifted their view. I think primarily the war in Ukraine, seeing the way that Russia and Ukraine have been fighting that war has really mobilized a lot of American executives into thinking that the US Army is not ready to fight the kind of wars that get fought now. Tanks and fighter jets and all that are always going to be part of the U.S. Military. But the way that drone warfare has shifted things, the way the AI systems have shifted both the way militaries collect intelligence and choose targets and select how to act, all of that is not possible without the kind of technical companies and expertise you have in Silicon Valley. And so there’s this sense of like, oh, well, if America goes to war and we’re they’re helping, we may not win. We also have seen a really radically shifting political climate in Silicon Valley. More and more executives have openly expressed support of Donald Trump and his administration. You hear a lot of people out here being like, well, I may not agree with everything that Trump does, but he’s good for business and he’s good for this. And you hear that kind of thing more and more. And so you have a certain willingness of executives to kind of come out and say, I want to work with Trump. I think it’s positive for me and my company to work with him.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:10:25] I also have to imagine that money plays a big role here. You mentioned how many of these military contracts have a pretty big price tag on them. I mean, what role do you think that plays? And I know the president too has pledged to spend a lot more on the military.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:10:45] Trump wants to put into place budgets that are going to see a lot of money flowing to the kind of new technology that Silicon Valley can produce. And so if you’re an executive out here, and not to name names, but you’ve decided to rename your company Meta because you think the Metaverse is the future. And then people are kind of like, well, I don’t know if I want to live in the Metaverse. I’m not sure that I want AR and VR goggles. And then the US military comes around and they’re like, Well, we’ll buy half a billion dollars worth of VR goggles because we want to train our soldiers on how to fight in war by putting them through battle scenarios. And suddenly, suddenly there’s a reason to name your company Meta. Suddenly there’s an actual client that wants to buy all that. And so it makes a lot of business sense for these companies to be in this way, and finding military applications for the technology they’ve been working on.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:11:37] Yeah, you just mentioned Meta and these AR VR goggles. I mean, what are some examples, I guess, of this shift that is happening in Silicon Valley? And I guess what specifically to our tech executives saying?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:11:53] You hear a lot of pride among tech executives that they’re working this closely with the U.S. Government, I like to look at their Instagram or their threads or their X pages because you can tell a lot by what they post. And if you look at them over the last, I’d say, year or so, you’re seeing a lot of like American flags flying in the background of posts. You’re seeing lot of posting about how great America is and how proud they are to be Americans doing business in America.\u003c/p>\n\u003cp>\u003cstrong>Sam Altman \u003c/strong>[00:12:22] Of course, we have to and are proud to and really want to engage in national security areas.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:12:29] Sam Altman, the CEO of OpenAI, has started talking about the importance of working with the U.S. Government just in the last year.\u003c/p>\n\u003cp>\u003cstrong>Sam Altman \u003c/strong>[00:12:36] Part of AI to benefit all of humanity very clearly involves supporting the US and our allies to uphold democratic values around the world and to keep us safe. And this is like an integral part of our mission. This is not some side quest that maybe we think about at some point.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:12:55] That’s a shift, and it’s really noticeable among the top executives. That’s something you’re really seeing at the top, and I think there is a gulf here between what executives are saying and posting and feeling about all this, and what the workforce is feeling about the direction that their companies are taking. You’ve also seen a lot of contracts signed. You’ve seen companies like OpenAI partnering with Andrel to use their AI technology to create weapons of the future. The question now isn’t whether the US is going to have autonomous weapons. It’s when will the US have autonomous weapons, and how quickly will companies like Google, or OpenAI, or Microsoft be able to use and pivot their AI technology to create these weapons.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:13:46] I mean, this is making me think about Google back in 2018, as we were talking about earlier, and the role that the employees at these companies played in pushing back against this working with the US military. Are we seeing that same kind of pushback by tech employees in Silicon Valley now?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:14:08] We are not seeing the kind of loud public pushback that we saw a little less than a decade ago. I spoke to quite a few engineers and employees at tech companies that are working with the U.S. Government who are worried. They’re sitting there and going, well, I joined this company because I believed in the ethos of connecting the world or do no evil. And now, I don’t know, I might be building an AI system that helps choose bombing targets faster for some future war, in which were you know, launching aerial strikes. I just think there’s this interesting moment where a lot of these people are asking themselves, do I feel good about the work I’m doing? But they’re doing it quietly, to be clear, because the last few years have seen a lot of layoffs across the big companies. And a lot of these people are worried for their jobs.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:14:57] And we’ve seen that over the issue of Israel and Palestine, for example, at some of these tech companies, right? That there is real pushback happening now from the top.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:15:08] Very much so. And a couple of the employees I spoke to looked specifically at Gaza as an example of a very AI driven war. I’ve written about this a lot about the systems that Israel built to be able to choose more targets to strike, to be to analyze intelligence quickly, to, you know, the facial recognition software that they’re deploying to use across Gaza. All of this are the kinds of systems that America is thinking about building. And you’re an employee, you’re looking at and you’re saying, is that the future of war?\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:15:40] I mean, Sheera, there’s obviously this moral opposition here. But I mean are there any other reasons why this collaboration between Silicon Valley and the US military is a maybe concerning trend? I mean I’m thinking about this technology and its use for surveillance in the US potentially even. I mean what are the other concerns around this?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:16:08] I think the concerns are that you can’t put the genie back in the bottle. Technology can introduce different levels of surveillance that the US government can then choose to use as it wants to, right? And so there’s questions of how much more of a surveillance state does the US become. There are questions of, again, autonomous weapons. And every soldier I’ve met has talked about how the introduction of autonomous weapons removes one layer of humanity in war and that when it is robots firing at robots, it’s a very different war. And so there are people out there that are asking these questions of, do we want all these autonomous systems? What does that mean? Are we just making killing easier in the next conflict? And so, yes, anytime a technology is introduced, I think there’s a rush to kind of embrace that new technology. And then often a little like a beat later, like some would say a moment too late, there’s the question of, is this good?\u003c/p>\n\u003cp>[ad floatright]\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:17:14] Well, Sheera, thank you so much for sharing your reporting with us.\u003c/p>\n\n",
"blocks": [],
"excerpt": "Silicon Valley is fully embracing contracts and collaborations with the military. Sheera Frenkel, tech reporter with the New York Times, explains how and why this shift occurred.",
"status": "publish",
"parent": 0,
"modified": 1756928398,
"stats": {
"hasAudio": true,
"hasVideo": false,
"hasChartOrMap": false,
"iframeSrcs": [],
"hasGoogleForm": false,
"hasGallery": false,
"hasHearkenModule": false,
"hasPolis": false,
"paragraphCount": 50,
"wordCount": 3287
},
"headData": {
"title": "‘We Have To, and Are Proud To’: Silicon Valley Embraces the U.S. Military | KQED",
"description": "Silicon Valley is fully embracing contracts and collaborations with the military. Sheera Frenkel, tech reporter with the New York Times, explains how and why this shift occurred.",
"ogTitle": "",
"ogDescription": "",
"ogImgId": "",
"twTitle": "",
"twDescription": "",
"twImgId": "",
"schema": {
"@context": "https://schema.org",
"@type": "NewsArticle",
"headline": "‘We Have To, and Are Proud To’: Silicon Valley Embraces the U.S. Military",
"datePublished": "2025-09-03T03:00:38-07:00",
"dateModified": "2025-09-03T12:39:58-07:00",
"image": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"isAccessibleForFree": "True",
"publisher": {
"@type": "NewsMediaOrganization",
"@id": "https://www.kqed.org/#organization",
"name": "KQED",
"logo": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"url": "https://www.kqed.org",
"sameAs": [
"https://www.facebook.com/KQED",
"https://twitter.com/KQED",
"https://www.instagram.com/kqed/",
"https://www.tiktok.com/@kqedofficial",
"https://www.linkedin.com/company/kqed",
"https://www.youtube.com/channel/UCeC0IOo7i1P_61zVUWbJ4nw"
]
}
}
},
"primaryCategory": {
"termId": 8,
"slug": "news",
"name": "News"
},
"source": "The Bay",
"sourceUrl": "https://www.kqed.org/podcasts/thebay",
"audioUrl": "https://www.podtrac.com/pts/redirect.mp3/chrt.fm/track/G6C7C3/traffic.megaphone.fm/KQINC5559995627.mp3",
"sticky": false,
"nprStoryId": "kqed-12054417",
"templateType": "standard",
"featuredImageType": "standard",
"excludeFromSiteSearch": "Include",
"articleAge": "0",
"path": "/news/12054417/we-have-to-and-are-proud-to-big-tech-embraces-the-u-s-military",
"audioTrackLength": null,
"parsedContent": [
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003cp>A decade ago, most major tech companies swore off working with the U.S. military. Google, Meta and OpenAI even once had policies banning the use of AI in weapons.\u003c/p>\n\u003cp>But times have changed, and now Silicon Valley is fully embracing contracts and collaborations with the military. Sheera Frenkel, tech reporter with the New York Times, explains how and why this shift occurred.\u003c/p>\n\u003cp>\u003cem>\u003ci>Some members of the KQED podcast team are represented by The Screen Actors Guild, American Federation of Television and Radio Artists. San Francisco Northern California Local.\u003c/i>\u003c/em>\u003c/p>\n\u003cp>\u003cstrong>Links:\u003c/strong>\u003c/p>\n\u003cul>\n\u003cli>\u003ca href=\"https://www.nytimes.com/2025/08/04/technology/google-meta-openai-military-war.html\">The Militarization of Silicon Valley\u003c/a>\u003c/li>\n\u003c/ul>\n\u003cp>\u003c!-- iframe plugin v.4.3 wordpress.org/plugins/iframe/ -->\u003cbr>\n\u003ciframe loading=\"lazy\" frameborder=\"0\" height=\"200\" scrolling=\"no\" src=\"https://playlist.megaphone.fm?e=KQINC5559995627&light=true\" width=\"100%\" class=\"iframe-class\">\u003c/iframe>\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "fullwidth"
},
"numeric": [
"fullwidth"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>\u003cem>This is a computer-generated transcript. While our team has reviewed it, there may be errors.\u003c/em>\u003c/p>\n\n\u003cp>\u003cstrong>Jessica Kariisa \u003c/strong>[00:00:00] I’m Jessica Kariisa, and welcome to The Bay, local news to keep you rooted.\u003c/p>\n\u003cp>\u003cstrong>Ambi \u003c/strong>[00:00:05] Ladies and gentlemen, welcome to the Army Jacket Ceremony and the Commissioning Ceremony for Detachment 201.\u003c/p>\n\u003cp>\u003cstrong>Jessica Kariisa \u003c/strong>[00:00:13] In June of this year, four current and former executives from Meta, OpenAI, and Palantir took center stage at a ceremony at the Joint Base Meyer Henderson Hall in Arlington, Virginia. Wearing combat gear and boots, the executives were there for their swearing-in ceremony as Lieutenant Colonels in Detachment 201. A new unit to advise the Army on new technology for use in combat.\u003c/p>\n\u003cp>\u003cstrong>Ambi \u003c/strong>[00:00:45] In an era defined by information warfare, automation, and digital disruption, the army needs skilled technologists in its ranks now more than ever.\u003c/p>\n\u003cp>\u003cstrong>Jessica Kariisa \u003c/strong>[00:00:58] Big tech has embraced the U.S. Military. It’s a dramatic shift from just a decade ago when most of Silicon Valley was firmly against helping the government wage war. These days, tech executives are singing a different tune.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:01:14] You’re seeing a lot of posting about how great America is and how proud they are to be Americans doing business in America. That’s a shift and it’s really noticeable among the top executives.\u003c/p>\n\u003cp>\u003cstrong>Jessica Kariisa \u003c/strong>[00:01:30] Today, Sheera Frenkel from The New York Times talks with The Bay’s host, Ericka Cruz Guevarra, about how Silicon Valley changed its mind on working with the military.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:01:51] Sheera, I guess how might you describe how tight Silicon Valley and the U.S. Government and U. S. Military in particular are these days?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:02:01] We are in a moment of exceptional closeness between the U.S. government and Silicon Valley, and that is really unusual. Silicon Valley had its origins with funding from the U.S. Government. But until now, there has not been this kind of widespread across the board move of Silicon Valley, you know, big companies, executives working closely with the U S military and having the kind of technology that’s actually useful for them. This is a region that saw itself as liberal, progressive, independent, connecting the world. That was a big motto. This idea that it was really international and it was about the good of all humankind, and not something that was specifically wedded to kind of an American patriotism. There’ve been figures, there’ve been characters, there’s been companies that have been public about their want and their need to work with the U.S. Government, but as much as a decade ago, there was widespread protests across Silicon Valley by the employee base at the idea of working closely with the government.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:03:12] Yeah, don’t be evil, right, as Google used to say. And I’m thinking, you mentioned the protests, I’m thinking back to 2018 and Google when there were these mass protests by employees there around Google’s involvement in a Pentagon program, right? Can you just remind me of that era of Google, of this like don’t-be-evil sort of motto?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:03:38] That was an era where people came to work at Google, they would graduate from the top universities in the United States. And as people in their early 20s, they saw it as this just really sort of do good, do positive things for the world kind of company. And executives fed into it, this idea of it’s bottom-up kind of culture and we listen to every employee and if you guys protest, we want to hear about it.\u003c/p>\n\u003cp>\u003cstrong>News clip \u003c/strong>[00:04:03] A letter to Google CEO Sundar Pichai is signed by more than 3,000 Google workers. Here’s what it says, quote, we believe Google should not be in the business of war, therefore we ask that Project Maven be canceled and that Google draft publicize.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:04:18] And so when Google employees came out en masse and said they did not want executives to pursue a contract with the U.S. Government with the Pentagon, executives listened and they backed down. And you saw employees at smaller companies across Silicon Valley taking note.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:04:31] And I remember the protests not just being effective in stopping the collaboration with this program but it literally became policy at Google to not pursue contracts with the US Military right?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:04:50] Three of the the biggest companies, Meta, OpenAI, and Google, all changed their terms of service so that they would not work with the U.S. Government and that specifically their AI technology wouldn’t be used to help build defense systems. It was literally, we’re going to create policy so that our systems can’t be used for defense or for military purposes. That’s how strongly these executives doubled down on what their employees were asking for.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:05:16] Around this time, Sheera, is it fair to say that everyone in tech was pretty much against military contracts?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:05:24] I wouldn’t say everyone because you had outliers. You had companies like Palantir, who was very outspoken about their work with the US government. They, in fact, sued the army to get a contract because they were so keen on being a tech company that was very out, very public, very aggressive about wanting to be a tech companies that worked with the U.S. Military.\u003c/p>\n\u003cp>\u003cstrong>Alex Karp \u003c/strong>[00:05:47] And while there, you had the idea for Palantir? Yeah, well, you know, post 9-11, I think the idea, again, it was Silicon Valley ought to be involved in fighting terrorism and protecting our civil liberties.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:05:59] Alex Karp, the CEO of Palantir, talks about the importance of working with the government all the time.\u003c/p>\n\u003cp>\u003cstrong>Alex Karp \u003c/strong>[00:06:05] We are kind of the greatest democracy in the world, and we tend to win wars where the people believe in what they’re doing. Where the people think that there’s a trade-off between civil liberties and fighting cyber terrorists, it’s going to be very hard to win.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:06:17] I just remember how clear it was that they were outliers at that time to what the rest of kind of the Silicon Valley companies were feeling and doing and saying.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:06:31] And for folks who maybe aren’t as familiar with Palantir, what do they do?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:06:35] Palantire is a funny company in that they had a certain mysterious aura around them for a long time, and I think they encouraged that by not saying much about what they did. They build systems. They build data systems that can analyze data, that can process it, that can draw conclusions. For instance, they work across the U.S. Federal government, and they’ll come into a place and say, right, here is all the data you sit on. We are not just going to organize it for you, we’re going to make it easy for you visualize it, to analyze it, our AI will draw conclusions. So for a long time, they were used by police departments, for instance, or they were used by different intelligence services to help look at their own data and sort of be able to understand it, even if you were not necessarily a technically minded person.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:07:23] I guess we’re talking now because, as you’re just talking about, Palantir was sort of this outlier among tech companies, really among one of the only ones really working closely with the U.S. Military, but increasingly they’re someone that other tech companies are becoming more and more jealous of these days, it seems like.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:07:44] Yeah, it’s really interesting. It’s come full circle. All these tech companies that, you know, stepped away from the US government are now looking at Palantir’s incredibly lucrative contracts across the US Government. Each one of these contracts can be worth hundreds of millions of dollars. And once you are working with the US government, they’re pretty faithful as clients. So you’re looking at these contracts that are going to give you amazing revenue year after year. And they want to work with American companies. They seek out American companies. And so I’ve heard some pretty senior executives at Meta and at Google say quite plainly, like, we’re jealous. We wish we were in there sooner.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:08:26] What exactly has changed here? Like, how did a company like Google go from don’t be evil to now attempting, it looks like, to pursue contracts with the US military? Like, what is this change?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:08:39] I think an executive at Google would say, well, we’ve rethought what it looks like to be evil. A couple things have happened in the last five years or so that have shifted their view. I think primarily the war in Ukraine, seeing the way that Russia and Ukraine have been fighting that war has really mobilized a lot of American executives into thinking that the US Army is not ready to fight the kind of wars that get fought now. Tanks and fighter jets and all that are always going to be part of the U.S. Military. But the way that drone warfare has shifted things, the way the AI systems have shifted both the way militaries collect intelligence and choose targets and select how to act, all of that is not possible without the kind of technical companies and expertise you have in Silicon Valley. And so there’s this sense of like, oh, well, if America goes to war and we’re they’re helping, we may not win. We also have seen a really radically shifting political climate in Silicon Valley. More and more executives have openly expressed support of Donald Trump and his administration. You hear a lot of people out here being like, well, I may not agree with everything that Trump does, but he’s good for business and he’s good for this. And you hear that kind of thing more and more. And so you have a certain willingness of executives to kind of come out and say, I want to work with Trump. I think it’s positive for me and my company to work with him.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:10:25] I also have to imagine that money plays a big role here. You mentioned how many of these military contracts have a pretty big price tag on them. I mean, what role do you think that plays? And I know the president too has pledged to spend a lot more on the military.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:10:45] Trump wants to put into place budgets that are going to see a lot of money flowing to the kind of new technology that Silicon Valley can produce. And so if you’re an executive out here, and not to name names, but you’ve decided to rename your company Meta because you think the Metaverse is the future. And then people are kind of like, well, I don’t know if I want to live in the Metaverse. I’m not sure that I want AR and VR goggles. And then the US military comes around and they’re like, Well, we’ll buy half a billion dollars worth of VR goggles because we want to train our soldiers on how to fight in war by putting them through battle scenarios. And suddenly, suddenly there’s a reason to name your company Meta. Suddenly there’s an actual client that wants to buy all that. And so it makes a lot of business sense for these companies to be in this way, and finding military applications for the technology they’ve been working on.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:11:37] Yeah, you just mentioned Meta and these AR VR goggles. I mean, what are some examples, I guess, of this shift that is happening in Silicon Valley? And I guess what specifically to our tech executives saying?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:11:53] You hear a lot of pride among tech executives that they’re working this closely with the U.S. Government, I like to look at their Instagram or their threads or their X pages because you can tell a lot by what they post. And if you look at them over the last, I’d say, year or so, you’re seeing a lot of like American flags flying in the background of posts. You’re seeing lot of posting about how great America is and how proud they are to be Americans doing business in America.\u003c/p>\n\u003cp>\u003cstrong>Sam Altman \u003c/strong>[00:12:22] Of course, we have to and are proud to and really want to engage in national security areas.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:12:29] Sam Altman, the CEO of OpenAI, has started talking about the importance of working with the U.S. Government just in the last year.\u003c/p>\n\u003cp>\u003cstrong>Sam Altman \u003c/strong>[00:12:36] Part of AI to benefit all of humanity very clearly involves supporting the US and our allies to uphold democratic values around the world and to keep us safe. And this is like an integral part of our mission. This is not some side quest that maybe we think about at some point.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:12:55] That’s a shift, and it’s really noticeable among the top executives. That’s something you’re really seeing at the top, and I think there is a gulf here between what executives are saying and posting and feeling about all this, and what the workforce is feeling about the direction that their companies are taking. You’ve also seen a lot of contracts signed. You’ve seen companies like OpenAI partnering with Andrel to use their AI technology to create weapons of the future. The question now isn’t whether the US is going to have autonomous weapons. It’s when will the US have autonomous weapons, and how quickly will companies like Google, or OpenAI, or Microsoft be able to use and pivot their AI technology to create these weapons.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:13:46] I mean, this is making me think about Google back in 2018, as we were talking about earlier, and the role that the employees at these companies played in pushing back against this working with the US military. Are we seeing that same kind of pushback by tech employees in Silicon Valley now?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:14:08] We are not seeing the kind of loud public pushback that we saw a little less than a decade ago. I spoke to quite a few engineers and employees at tech companies that are working with the U.S. Government who are worried. They’re sitting there and going, well, I joined this company because I believed in the ethos of connecting the world or do no evil. And now, I don’t know, I might be building an AI system that helps choose bombing targets faster for some future war, in which were you know, launching aerial strikes. I just think there’s this interesting moment where a lot of these people are asking themselves, do I feel good about the work I’m doing? But they’re doing it quietly, to be clear, because the last few years have seen a lot of layoffs across the big companies. And a lot of these people are worried for their jobs.\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:14:57] And we’ve seen that over the issue of Israel and Palestine, for example, at some of these tech companies, right? That there is real pushback happening now from the top.\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:15:08] Very much so. And a couple of the employees I spoke to looked specifically at Gaza as an example of a very AI driven war. I’ve written about this a lot about the systems that Israel built to be able to choose more targets to strike, to be to analyze intelligence quickly, to, you know, the facial recognition software that they’re deploying to use across Gaza. All of this are the kinds of systems that America is thinking about building. And you’re an employee, you’re looking at and you’re saying, is that the future of war?\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:15:40] I mean, Sheera, there’s obviously this moral opposition here. But I mean are there any other reasons why this collaboration between Silicon Valley and the US military is a maybe concerning trend? I mean I’m thinking about this technology and its use for surveillance in the US potentially even. I mean what are the other concerns around this?\u003c/p>\n\u003cp>\u003cstrong>Sheera Frenkel \u003c/strong>[00:16:08] I think the concerns are that you can’t put the genie back in the bottle. Technology can introduce different levels of surveillance that the US government can then choose to use as it wants to, right? And so there’s questions of how much more of a surveillance state does the US become. There are questions of, again, autonomous weapons. And every soldier I’ve met has talked about how the introduction of autonomous weapons removes one layer of humanity in war and that when it is robots firing at robots, it’s a very different war. And so there are people out there that are asking these questions of, do we want all these autonomous systems? What does that mean? Are we just making killing easier in the next conflict? And so, yes, anytime a technology is introduced, I think there’s a rush to kind of embrace that new technology. And then often a little like a beat later, like some would say a moment too late, there’s the question of, is this good?\u003c/p>\n\u003cp>\u003c/p>\u003c/div>",
"attributes": {
"named": {},
"numeric": []
}
},
{
"type": "component",
"content": "",
"name": "ad",
"attributes": {
"named": {
"label": "floatright"
},
"numeric": [
"floatright"
]
}
},
{
"type": "contentString",
"content": "\u003cdiv class=\"post-body\">\u003cp>\u003c/p>\n\u003cp>\u003cstrong>Ericka Cruz Guevarra \u003c/strong>[00:17:14] Well, Sheera, thank you so much for sharing your reporting with us.\u003c/p>\n\n\u003c/div>\u003c/p>",
"attributes": {
"named": {},
"numeric": []
}
}
],
"link": "/news/12054417/we-have-to-and-are-proud-to-big-tech-embraces-the-u-s-military",
"authors": [
"8654",
"11831",
"11649"
],
"categories": [
"news_8"
],
"tags": [
"news_25184",
"news_34755",
"news_1323",
"news_33812",
"news_250",
"news_80",
"news_33543",
"news_34586",
"news_1631",
"news_22598"
],
"featImg": "news_12054444",
"label": "source_news_12054417"
}
},
"programsReducer": {
"possible": {
"id": "possible",
"title": "Possible",
"info": "Possible is hosted by entrepreneur Reid Hoffman and writer Aria Finger. Together in Possible, Hoffman and Finger lead enlightening discussions about building a brighter collective future. The show features interviews with visionary guests like Trevor Noah, Sam Altman and Janette Sadik-Khan. Possible paints an optimistic portrait of the world we can create through science, policy, business, art and our shared humanity. It asks: What if everything goes right for once? How can we get there? Each episode also includes a short fiction story generated by advanced AI GPT-4, serving as a thought-provoking springboard to speculate how humanity could leverage technology for good.",
"airtime": "SUN 2pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Possible-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.possible.fm/",
"meta": {
"site": "news",
"source": "Possible"
},
"link": "/radio/program/possible",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/possible/id1677184070",
"spotify": "https://open.spotify.com/show/730YpdUSNlMyPQwNnyjp4k"
}
},
"1a": {
"id": "1a",
"title": "1A",
"info": "1A is home to the national conversation. 1A brings on great guests and frames the best debate in ways that make you think, share and engage.",
"airtime": "MON-THU 11pm-12am",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/1a.jpg",
"officialWebsiteLink": "https://the1a.org/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/1a",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/RBrW",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=1188724250&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/1A-p947376/",
"rss": "https://feeds.npr.org/510316/podcast.xml"
}
},
"all-things-considered": {
"id": "all-things-considered",
"title": "All Things Considered",
"info": "Every weekday, \u003cem>All Things Considered\u003c/em> hosts Robert Siegel, Audie Cornish, Ari Shapiro, and Kelly McEvers present the program's trademark mix of news, interviews, commentaries, reviews, and offbeat features. Michel Martin hosts on the weekends.",
"airtime": "MON-FRI 1pm-2pm, 4:30pm-6:30pm\u003cbr />SAT-SUN 5pm-6pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/All-Things-Considered-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/all-things-considered/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/all-things-considered"
},
"american-suburb-podcast": {
"id": "american-suburb-podcast",
"title": "American Suburb: The Podcast",
"tagline": "The flip side of gentrification, told through one town",
"info": "Gentrification is changing cities across America, forcing people from neighborhoods they have long called home. Call them the displaced. Now those priced out of the Bay Area are looking for a better life in an unlikely place. American Suburb follows this migration to one California town along the Delta, 45 miles from San Francisco. But is this once sleepy suburb ready for them?",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/American-Suburb-Podcast-Tile-703x703-1.jpg",
"officialWebsiteLink": "/news/series/american-suburb-podcast",
"meta": {
"site": "news",
"source": "kqed",
"order": 19
},
"link": "/news/series/american-suburb-podcast/",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/RBrW",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?mt=2&id=1287748328",
"tuneIn": "https://tunein.com/radio/American-Suburb-p1086805/",
"rss": "https://ww2.kqed.org/news/series/american-suburb-podcast/feed/podcast",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkMzMDExODgxNjA5"
}
},
"baycurious": {
"id": "baycurious",
"title": "Bay Curious",
"tagline": "Exploring the Bay Area, one question at a time",
"info": "KQED’s new podcast, Bay Curious, gets to the bottom of the mysteries — both profound and peculiar — that give the Bay Area its unique identity. And we’ll do it with your help! You ask the questions. You decide what Bay Curious investigates. And you join us on the journey to find the answers.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Bay-Curious-Podcast-Tile-703x703-1.jpg",
"imageAlt": "\"KQED Bay Curious",
"officialWebsiteLink": "/news/series/baycurious",
"meta": {
"site": "news",
"source": "kqed",
"order": 4
},
"link": "/podcasts/baycurious",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/bay-curious/id1172473406",
"npr": "https://www.npr.org/podcasts/500557090/bay-curious",
"rss": "https://ww2.kqed.org/news/category/bay-curious-podcast/feed/podcast",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93dzIua3FlZC5vcmcvbmV3cy9jYXRlZ29yeS9iYXktY3VyaW91cy1wb2RjYXN0L2ZlZWQvcG9kY2FzdA",
"stitcher": "https://www.stitcher.com/podcast/kqed/bay-curious",
"spotify": "https://open.spotify.com/show/6O76IdmhixfijmhTZLIJ8k"
}
},
"bbc-world-service": {
"id": "bbc-world-service",
"title": "BBC World Service",
"info": "The day's top stories from BBC News compiled twice daily in the week, once at weekends.",
"airtime": "MON-FRI 9pm-10pm, TUE-FRI 1am-2am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/BBC-World-Service-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.bbc.co.uk/sounds/play/live:bbc_world_service",
"meta": {
"site": "news",
"source": "BBC World Service"
},
"link": "/radio/program/bbc-world-service",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/global-news-podcast/id135067274?mt=2",
"tuneIn": "https://tunein.com/radio/BBC-World-Service-p455581/",
"rss": "https://podcasts.files.bbci.co.uk/p02nq0gn.rss"
}
},
"code-switch-life-kit": {
"id": "code-switch-life-kit",
"title": "Code Switch / Life Kit",
"info": "\u003cem>Code Switch\u003c/em>, which listeners will hear in the first part of the hour, has fearless and much-needed conversations about race. Hosted by journalists of color, the show tackles the subject of race head-on, exploring how it impacts every part of society — from politics and pop culture to history, sports and more.\u003cbr />\u003cbr />\u003cem>Life Kit\u003c/em>, which will be in the second part of the hour, guides you through spaces and feelings no one prepares you for — from finances to mental health, from workplace microaggressions to imposter syndrome, from relationships to parenting. The show features experts with real world experience and shares their knowledge. Because everyone needs a little help being human.\u003cbr />\u003cbr />\u003ca href=\"https://www.npr.org/podcasts/510312/codeswitch\">\u003cem>Code Switch\u003c/em> offical site and podcast\u003c/a>\u003cbr />\u003ca href=\"https://www.npr.org/lifekit\">\u003cem>Life Kit\u003c/em> offical site and podcast\u003c/a>\u003cbr />",
"airtime": "SUN 9pm-10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Code-Switch-Life-Kit-Podcast-Tile-360x360-1.jpg",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/code-switch-life-kit",
"subscribe": {
"apple": "https://podcasts.apple.com/podcast/1112190608?mt=2&at=11l79Y&ct=nprdirectory",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93d3cubnByLm9yZy9yc3MvcG9kY2FzdC5waHA_aWQ9NTEwMzEy",
"spotify": "https://open.spotify.com/show/3bExJ9JQpkwNhoHvaIIuyV",
"rss": "https://feeds.npr.org/510312/podcast.xml"
}
},
"commonwealth-club": {
"id": "commonwealth-club",
"title": "Commonwealth Club of California Podcast",
"info": "The Commonwealth Club of California is the nation's oldest and largest public affairs forum. As a non-partisan forum, The Club brings to the public airwaves diverse viewpoints on important topics. The Club's weekly radio broadcast - the oldest in the U.S., dating back to 1924 - is carried across the nation on public radio stations and is now podcasting. Our website archive features audio of our recent programs, as well as selected speeches from our long and distinguished history. This podcast feed is usually updated twice a week and is always un-edited.",
"airtime": "THU 10pm, FRI 1am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Commonwealth-Club-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.commonwealthclub.org/podcasts",
"meta": {
"site": "news",
"source": "Commonwealth Club of California"
},
"link": "/radio/program/commonwealth-club",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/commonwealth-club-of-california-podcast/id976334034?mt=2",
"google": "https://podcasts.google.com/feed/aHR0cDovL3d3dy5jb21tb253ZWFsdGhjbHViLm9yZy9hdWRpby9wb2RjYXN0L3dlZWtseS54bWw",
"tuneIn": "https://tunein.com/radio/Commonwealth-Club-of-California-p1060/"
}
},
"forum": {
"id": "forum",
"title": "Forum",
"tagline": "The conversation starts here",
"info": "KQED’s live call-in program discussing local, state, national and international issues, as well as in-depth interviews.",
"airtime": "MON-FRI 9am-11am, 10pm-11pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Forum-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Forum with Mina Kim and Alexis Madrigal",
"officialWebsiteLink": "/forum",
"meta": {
"site": "news",
"source": "kqed",
"order": 10
},
"link": "/forum",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/kqeds-forum/id73329719",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM5NTU3MzgxNjMz",
"npr": "https://www.npr.org/podcasts/432307980/forum",
"stitcher": "https://www.stitcher.com/podcast/kqedfm-kqeds-forum-podcast",
"rss": "https://feeds.megaphone.fm/KQINC9557381633"
}
},
"freakonomics-radio": {
"id": "freakonomics-radio",
"title": "Freakonomics Radio",
"info": "Freakonomics Radio is a one-hour award-winning podcast and public-radio project hosted by Stephen Dubner, with co-author Steve Levitt as a regular guest. It is produced in partnership with WNYC.",
"imageSrc": "https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/05/freakonomicsRadio.png",
"officialWebsiteLink": "http://freakonomics.com/",
"airtime": "SUN 1am-2am, SAT 3pm-4pm",
"meta": {
"site": "radio",
"source": "WNYC"
},
"link": "/radio/program/freakonomics-radio",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/4s8b",
"apple": "https://itunes.apple.com/us/podcast/freakonomics-radio/id354668519",
"tuneIn": "https://tunein.com/podcasts/WNYC-Podcasts/Freakonomics-Radio-p272293/",
"rss": "https://feeds.feedburner.com/freakonomicsradio"
}
},
"fresh-air": {
"id": "fresh-air",
"title": "Fresh Air",
"info": "Hosted by Terry Gross, \u003cem>Fresh Air from WHYY\u003c/em> is the Peabody Award-winning weekday magazine of contemporary arts and issues. One of public radio's most popular programs, Fresh Air features intimate conversations with today's biggest luminaries.",
"airtime": "MON-FRI 7pm-8pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Fresh-Air-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/fresh-air/",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/fresh-air",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/4s8b",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=214089682&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Fresh-Air-p17/",
"rss": "https://feeds.npr.org/381444908/podcast.xml"
}
},
"here-and-now": {
"id": "here-and-now",
"title": "Here & Now",
"info": "A live production of NPR and WBUR Boston, in collaboration with stations across the country, Here & Now reflects the fluid world of news as it's happening in the middle of the day, with timely, in-depth news, interviews and conversation. Hosted by Robin Young, Jeremy Hobson and Tonya Mosley.",
"airtime": "MON-THU 11am-12pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Here-And-Now-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "http://www.wbur.org/hereandnow",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/here-and-now",
"subsdcribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?mt=2&id=426698661",
"tuneIn": "https://tunein.com/radio/Here--Now-p211/",
"rss": "https://feeds.npr.org/510051/podcast.xml"
}
},
"how-i-built-this": {
"id": "how-i-built-this",
"title": "How I Built This with Guy Raz",
"info": "Guy Raz dives into the stories behind some of the world's best known companies. How I Built This weaves a narrative journey about innovators, entrepreneurs and idealists—and the movements they built.",
"imageSrc": "https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/05/howIBuiltThis.png",
"officialWebsiteLink": "https://www.npr.org/podcasts/510313/how-i-built-this",
"airtime": "SUN 7:30pm-8pm",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/how-i-built-this",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/3zxy",
"apple": "https://itunes.apple.com/us/podcast/how-i-built-this-with-guy-raz/id1150510297?mt=2",
"tuneIn": "https://tunein.com/podcasts/Arts--Culture-Podcasts/How-I-Built-This-p910896/",
"rss": "https://feeds.npr.org/510313/podcast.xml"
}
},
"inside-europe": {
"id": "inside-europe",
"title": "Inside Europe",
"info": "Inside Europe, a one-hour weekly news magazine hosted by Helen Seeney and Keith Walker, explores the topical issues shaping the continent. No other part of the globe has experienced such dynamic political and social change in recent years.",
"airtime": "SAT 3am-4am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Inside-Europe-Podcast-Tile-300x300-1.jpg",
"meta": {
"site": "news",
"source": "Deutsche Welle"
},
"link": "/radio/program/inside-europe",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/inside-europe/id80106806?mt=2",
"tuneIn": "https://tunein.com/radio/Inside-Europe-p731/",
"rss": "https://partner.dw.com/xml/podcast_inside-europe"
}
},
"latino-usa": {
"id": "latino-usa",
"title": "Latino USA",
"airtime": "MON 1am-2am, SUN 6pm-7pm",
"info": "Latino USA, the radio journal of news and culture, is the only national, English-language radio program produced from a Latino perspective.",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/latinoUsa.jpg",
"officialWebsiteLink": "http://latinousa.org/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/latino-usa",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/xtTd",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=79681317&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Latino-USA-p621/",
"rss": "https://feeds.npr.org/510016/podcast.xml"
}
},
"live-from-here-highlights": {
"id": "live-from-here-highlights",
"title": "Live from Here Highlights",
"info": "Chris Thile steps to the mic as the host of Live from Here (formerly A Prairie Home Companion), a live public radio variety show. Download Chris’s Song of the Week plus other highlights from the broadcast. Produced by American Public Media.",
"airtime": "SAT 6pm-8pm, SUN 11am-1pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Live-From-Here-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.livefromhere.org/",
"meta": {
"site": "arts",
"source": "american public media"
},
"link": "/radio/program/live-from-here-highlights",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/id1167173941",
"tuneIn": "https://tunein.com/radio/Live-from-Here-Highlights-p921744/",
"rss": "https://feeds.publicradio.org/public_feeds/a-prairie-home-companion-highlights/rss/rss"
}
},
"marketplace": {
"id": "marketplace",
"title": "Marketplace",
"info": "Our flagship program, helmed by Kai Ryssdal, examines what the day in money delivered, through stories, conversations, newsworthy numbers and more. Updated Monday through Friday at about 3:30 p.m. PT.",
"airtime": "MON-FRI 4pm-4:30pm, MON-WED 6:30pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Marketplace-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.marketplace.org/",
"meta": {
"site": "news",
"source": "American Public Media"
},
"link": "/radio/program/marketplace",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=201853034&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/APM-Marketplace-p88/",
"rss": "https://feeds.publicradio.org/public_feeds/marketplace-pm/rss/rss"
}
},
"mindshift": {
"id": "mindshift",
"title": "MindShift",
"tagline": "A podcast about the future of learning and how we raise our kids",
"info": "The MindShift podcast explores the innovations in education that are shaping how kids learn. Hosts Ki Sung and Katrina Schwartz introduce listeners to educators, researchers, parents and students who are developing effective ways to improve how kids learn. We cover topics like how fed-up administrators are developing surprising tactics to deal with classroom disruptions; how listening to podcasts are helping kids develop reading skills; the consequences of overparenting; and why interdisciplinary learning can engage students on all ends of the traditional achievement spectrum. This podcast is part of the MindShift education site, a division of KQED News. KQED is an NPR/PBS member station based in San Francisco. You can also visit the MindShift website for episodes and supplemental blog posts or tweet us \u003ca href=\"https://twitter.com/MindShiftKQED\">@MindShiftKQED\u003c/a> or visit us at \u003ca href=\"/mindshift\">MindShift.KQED.org\u003c/a>",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Mindshift-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED MindShift: How We Will Learn",
"officialWebsiteLink": "/mindshift/",
"meta": {
"site": "news",
"source": "kqed",
"order": 13
},
"link": "/podcasts/mindshift",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/mindshift-podcast/id1078765985",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM1NzY0NjAwNDI5",
"npr": "https://www.npr.org/podcasts/464615685/mind-shift-podcast",
"stitcher": "https://www.stitcher.com/podcast/kqed/stories-teachers-share",
"spotify": "https://open.spotify.com/show/0MxSpNYZKNprFLCl7eEtyx"
}
},
"morning-edition": {
"id": "morning-edition",
"title": "Morning Edition",
"info": "\u003cem>Morning Edition\u003c/em> takes listeners around the country and the world with multi-faceted stories and commentaries every weekday. Hosts Steve Inskeep, David Greene and Rachel Martin bring you the latest breaking news and features to prepare you for the day.",
"airtime": "MON-FRI 3am-9am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Morning-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/morning-edition/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/morning-edition"
},
"onourwatch": {
"id": "onourwatch",
"title": "On Our Watch",
"tagline": "Deeply-reported investigative journalism",
"info": "For decades, the process for how police police themselves has been inconsistent – if not opaque. In some states, like California, these proceedings were completely hidden. After a new police transparency law unsealed scores of internal affairs files, our reporters set out to examine these cases and the shadow world of police discipline. On Our Watch brings listeners into the rooms where officers are questioned and witnesses are interrogated to find out who this system is really protecting. Is it the officers, or the public they've sworn to serve?",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/On-Our-Watch-Podcast-Tile-703x703-1.jpg",
"imageAlt": "On Our Watch from NPR and KQED",
"officialWebsiteLink": "/podcasts/onourwatch",
"meta": {
"site": "news",
"source": "kqed",
"order": 12
},
"link": "/podcasts/onourwatch",
"subscribe": {
"apple": "https://podcasts.apple.com/podcast/id1567098962",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5ucHIub3JnLzUxMDM2MC9wb2RjYXN0LnhtbD9zYz1nb29nbGVwb2RjYXN0cw",
"npr": "https://rpb3r.app.goo.gl/onourwatch",
"spotify": "https://open.spotify.com/show/0OLWoyizopu6tY1XiuX70x",
"tuneIn": "https://tunein.com/radio/On-Our-Watch-p1436229/",
"stitcher": "https://www.stitcher.com/show/on-our-watch",
"rss": "https://feeds.npr.org/510360/podcast.xml"
}
},
"on-the-media": {
"id": "on-the-media",
"title": "On The Media",
"info": "Our weekly podcast explores how the media 'sausage' is made, casts an incisive eye on fluctuations in the marketplace of ideas, and examines threats to the freedom of information and expression in America and abroad. For one hour a week, the show tries to lift the veil from the process of \"making media,\" especially news media, because it's through that lens that we see the world and the world sees us",
"airtime": "SUN 2pm-3pm, MON 12am-1am",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/onTheMedia.png",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/otm",
"meta": {
"site": "news",
"source": "wnyc"
},
"link": "/radio/program/on-the-media",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/on-the-media/id73330715?mt=2",
"tuneIn": "https://tunein.com/radio/On-the-Media-p69/",
"rss": "http://feeds.wnyc.org/onthemedia"
}
},
"our-body-politic": {
"id": "our-body-politic",
"title": "Our Body Politic",
"info": "Presented by KQED, KCRW and KPCC, and created and hosted by award-winning journalist Farai Chideya, Our Body Politic is unapologetically centered on reporting on not just how women of color experience the major political events of today, but how they’re impacting those very issues.",
"airtime": "SAT 6pm-7pm, SUN 1am-2am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Our-Body-Politic-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://our-body-politic.simplecast.com/",
"meta": {
"site": "news",
"source": "kcrw"
},
"link": "/radio/program/our-body-politic",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/our-body-politic/id1533069868",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5zaW1wbGVjYXN0LmNvbS9feGFQaHMxcw",
"spotify": "https://open.spotify.com/show/4ApAiLT1kV153TttWAmqmc",
"rss": "https://feeds.simplecast.com/_xaPhs1s",
"tuneIn": "https://tunein.com/podcasts/News--Politics-Podcasts/Our-Body-Politic-p1369211/"
}
},
"pbs-newshour": {
"id": "pbs-newshour",
"title": "PBS NewsHour",
"info": "Analysis, background reports and updates from the PBS NewsHour putting today's news in context.",
"airtime": "MON-FRI 3pm-4pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/PBS-News-Hour-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.pbs.org/newshour/",
"meta": {
"site": "news",
"source": "pbs"
},
"link": "/radio/program/pbs-newshour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/pbs-newshour-full-show/id394432287?mt=2",
"tuneIn": "https://tunein.com/radio/PBS-NewsHour---Full-Show-p425698/",
"rss": "https://www.pbs.org/newshour/feeds/rss/podcasts/show"
}
},
"perspectives": {
"id": "perspectives",
"title": "Perspectives",
"tagline": "KQED's series of daily listener commentaries since 1991",
"info": "KQED's series of daily listener commentaries since 1991.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/01/Perspectives_Tile_Final.jpg",
"officialWebsiteLink": "/perspectives/",
"meta": {
"site": "radio",
"source": "kqed",
"order": 15
},
"link": "/perspectives",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/id73801135",
"npr": "https://www.npr.org/podcasts/432309616/perspectives",
"rss": "https://ww2.kqed.org/perspectives/category/perspectives/feed/",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93dzIua3FlZC5vcmcvcGVyc3BlY3RpdmVzL2NhdGVnb3J5L3BlcnNwZWN0aXZlcy9mZWVkLw"
}
},
"planet-money": {
"id": "planet-money",
"title": "Planet Money",
"info": "The economy explained. Imagine you could call up a friend and say, Meet me at the bar and tell me what's going on with the economy. Now imagine that's actually a fun evening.",
"airtime": "SUN 3pm-4pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/planetmoney.jpg",
"officialWebsiteLink": "https://www.npr.org/sections/money/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/planet-money",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/M4f5",
"apple": "https://itunes.apple.com/us/podcast/planet-money/id290783428?mt=2",
"tuneIn": "https://tunein.com/podcasts/Business--Economics-Podcasts/Planet-Money-p164680/",
"rss": "https://feeds.npr.org/510289/podcast.xml"
}
},
"politicalbreakdown": {
"id": "politicalbreakdown",
"title": "Political Breakdown",
"tagline": "Politics from a personal perspective",
"info": "Political Breakdown is a new series that explores the political intersection of California and the nation. Each week hosts Scott Shafer and Marisa Lagos are joined with a new special guest to unpack politics -- with personality — and offer an insider’s glimpse at how politics happens.",
"airtime": "THU 6:30pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Political-Breakdown-2024-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Political Breakdown",
"officialWebsiteLink": "/podcasts/politicalbreakdown",
"meta": {
"site": "radio",
"source": "kqed",
"order": 6
},
"link": "/podcasts/politicalbreakdown",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/political-breakdown/id1327641087",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM5Nzk2MzI2MTEx",
"npr": "https://www.npr.org/podcasts/572155894/political-breakdown",
"stitcher": "https://www.stitcher.com/podcast/kqed/political-breakdown",
"spotify": "https://open.spotify.com/show/07RVyIjIdk2WDuVehvBMoN",
"rss": "https://ww2.kqed.org/news/tag/political-breakdown/feed/podcast"
}
},
"pri-the-world": {
"id": "pri-the-world",
"title": "PRI's The World: Latest Edition",
"info": "Each weekday, host Marco Werman and his team of producers bring you the world's most interesting stories in an hour of radio that reminds us just how small our planet really is.",
"airtime": "MON-FRI 2pm-3pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-World-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.pri.org/programs/the-world",
"meta": {
"site": "news",
"source": "PRI"
},
"link": "/radio/program/pri-the-world",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/pris-the-world-latest-edition/id278196007?mt=2",
"tuneIn": "https://tunein.com/podcasts/News--Politics-Podcasts/PRIs-The-World-p24/",
"rss": "http://feeds.feedburner.com/pri/theworld"
}
},
"radiolab": {
"id": "radiolab",
"title": "Radiolab",
"info": "A two-time Peabody Award-winner, Radiolab is an investigation told through sounds and stories, and centered around one big idea. In the Radiolab world, information sounds like music and science and culture collide. Hosted by Jad Abumrad and Robert Krulwich, the show is designed for listeners who demand skepticism, but appreciate wonder. WNYC Studios is the producer of other leading podcasts including Freakonomics Radio, Death, Sex & Money, On the Media and many more.",
"airtime": "SUN 12am-1am, SAT 2pm-3pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/radiolab1400.png",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/radiolab/",
"meta": {
"site": "science",
"source": "WNYC"
},
"link": "/radio/program/radiolab",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/radiolab/id152249110?mt=2",
"tuneIn": "https://tunein.com/radio/RadioLab-p68032/",
"rss": "https://feeds.wnyc.org/radiolab"
}
},
"reveal": {
"id": "reveal",
"title": "Reveal",
"info": "Created by The Center for Investigative Reporting and PRX, Reveal is public radios first one-hour weekly radio show and podcast dedicated to investigative reporting. Credible, fact based and without a partisan agenda, Reveal combines the power and artistry of driveway moment storytelling with data-rich reporting on critically important issues. The result is stories that inform and inspire, arming our listeners with information to right injustices, hold the powerful accountable and improve lives.Reveal is hosted by Al Letson and showcases the award-winning work of CIR and newsrooms large and small across the nation. In a radio and podcast market crowded with choices, Reveal focuses on important and often surprising stories that illuminate the world for our listeners.",
"airtime": "SAT 4pm-5pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/reveal300px.png",
"officialWebsiteLink": "https://www.revealnews.org/episodes/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/reveal",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/reveal/id886009669",
"tuneIn": "https://tunein.com/radio/Reveal-p679597/",
"rss": "http://feeds.revealradio.org/revealpodcast"
}
},
"says-you": {
"id": "says-you",
"title": "Says You!",
"info": "Public radio's game show of bluff and bluster, words and whimsy. The warmest, wittiest cocktail party - it's spirited and civil, brainy and boisterous, peppered with musical interludes. Fast paced and playful, it's the most fun you can have with language without getting your mouth washed out with soap. Our motto: It's not important to know the answers, it's important to like the answers!",
"airtime": "SUN 4pm-5pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Says-You-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "http://www.saysyouradio.com/",
"meta": {
"site": "comedy",
"source": "Pipit and Finch"
},
"link": "/radio/program/says-you",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/says-you!/id1050199826",
"tuneIn": "https://tunein.com/radio/Says-You-p480/",
"rss": "https://saysyou.libsyn.com/rss"
}
},
"science-friday": {
"id": "science-friday",
"title": "Science Friday",
"info": "Science Friday is a weekly science talk show, broadcast live over public radio stations nationwide. Each week, the show focuses on science topics that are in the news and tries to bring an educated, balanced discussion to bear on the scientific issues at hand. Panels of expert guests join host Ira Flatow, a veteran science journalist, to discuss science and to take questions from listeners during the call-in portion of the program.",
"airtime": "FRI 11am-1pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Science-Friday-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/science-friday",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/science-friday",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=73329284&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Science-Friday-p394/",
"rss": "http://feeds.wnyc.org/science-friday"
}
},
"selected-shorts": {
"id": "selected-shorts",
"title": "Selected Shorts",
"info": "Spellbinding short stories by established and emerging writers take on a new life when they are performed by stars of the stage and screen.",
"airtime": "SAT 8pm-9pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Selected-Shorts-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.pri.org/programs/selected-shorts",
"meta": {
"site": "arts",
"source": "pri"
},
"link": "/radio/program/selected-shorts",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=253191824&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Selected-Shorts-p31792/",
"rss": "https://feeds.megaphone.fm/selectedshorts"
}
},
"snap-judgment": {
"id": "snap-judgment",
"title": "Snap Judgment",
"tagline": "Real stories with killer beats",
"info": "The Snap Judgment radio show and podcast mixes real stories with killer beats to produce cinematic, dramatic radio. Snap's musical brand of storytelling dares listeners to see the world through the eyes of another. This is storytelling... with a BEAT!! Snap first aired on public radio stations nationwide in July 2010. Today, Snap Judgment airs on over 450 public radio stations and is brought to the airwaves by KQED & PRX.",
"airtime": "SAT 1pm-2pm, 9pm-10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/05/Snap-Judgment-Podcast-Tile-703x703-1.jpg",
"officialWebsiteLink": "https://snapjudgment.org",
"meta": {
"site": "arts",
"source": "kqed",
"order": 5
},
"link": "https://snapjudgment.org",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/snap-judgment/id283657561",
"npr": "https://www.npr.org/podcasts/449018144/snap-judgment",
"stitcher": "https://www.pandora.com/podcast/snap-judgment/PC:241?source=stitcher-sunset",
"spotify": "https://open.spotify.com/show/3Cct7ZWmxHNAtLgBTqjC5v",
"rss": "https://snap.feed.snapjudgment.org/"
}
},
"soldout": {
"id": "soldout",
"title": "SOLD OUT: Rethinking Housing in America",
"tagline": "A new future for housing",
"info": "Sold Out: Rethinking Housing in America",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Sold-Out-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED Sold Out: Rethinking Housing in America",
"officialWebsiteLink": "/podcasts/soldout",
"meta": {
"site": "news",
"source": "kqed",
"order": 14
},
"link": "/podcasts/soldout",
"subscribe": {
"npr": "https://www.npr.org/podcasts/911586047/s-o-l-d-o-u-t-a-new-future-for-housing",
"apple": "https://podcasts.apple.com/us/podcast/introducing-sold-out-rethinking-housing-in-america/id1531354937",
"rss": "https://feeds.megaphone.fm/soldout",
"spotify": "https://open.spotify.com/show/38dTBSk2ISFoPiyYNoKn1X",
"stitcher": "https://www.stitcher.com/podcast/kqed/sold-out-rethinking-housing-in-america",
"tunein": "https://tunein.com/radio/SOLD-OUT-Rethinking-Housing-in-America-p1365871/",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vc29sZG91dA"
}
},
"spooked": {
"id": "spooked",
"title": "Spooked",
"tagline": "True-life supernatural stories",
"info": "",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/10/Spooked-Podcast-Tile-703x703-1.jpg",
"imageAlt": "",
"officialWebsiteLink": "https://spookedpodcast.org/",
"meta": {
"site": "news",
"source": "kqed",
"order": 8
},
"link": "https://spookedpodcast.org/",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/spooked/id1279361017",
"npr": "https://www.npr.org/podcasts/549547848/snap-judgment-presents-spooked",
"spotify": "https://open.spotify.com/show/76571Rfl3m7PLJQZKQIGCT",
"rss": "https://feeds.simplecast.com/TBotaapn"
}
},
"ted-radio-hour": {
"id": "ted-radio-hour",
"title": "TED Radio Hour",
"info": "The TED Radio Hour is a journey through fascinating ideas, astonishing inventions, fresh approaches to old problems, and new ways to think and create.",
"airtime": "SUN 3pm-4pm, SAT 10pm-11pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/tedRadioHour.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/ted-radio-hour/?showDate=2018-06-22",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/ted-radio-hour",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/8vsS",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=523121474&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/TED-Radio-Hour-p418021/",
"rss": "https://feeds.npr.org/510298/podcast.xml"
}
},
"tech-nation": {
"id": "tech-nation",
"title": "Tech Nation Radio Podcast",
"info": "Tech Nation is a weekly public radio program, hosted by Dr. Moira Gunn. Founded in 1993, it has grown from a simple interview show to a multi-faceted production, featuring conversations with noted technology and science leaders, and a weekly science and technology-related commentary.",
"airtime": "FRI 10pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Tech-Nation-Radio-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "http://technation.podomatic.com/",
"meta": {
"site": "science",
"source": "Tech Nation Media"
},
"link": "/radio/program/tech-nation",
"subscribe": {
"rss": "https://technation.podomatic.com/rss2.xml"
}
},
"thebay": {
"id": "thebay",
"title": "The Bay",
"tagline": "Local news to keep you rooted",
"info": "Host Devin Katayama walks you through the biggest story of the day with reporters and newsmakers.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Bay-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Bay",
"officialWebsiteLink": "/podcasts/thebay",
"meta": {
"site": "radio",
"source": "kqed",
"order": 3
},
"link": "/podcasts/thebay",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-bay/id1350043452",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM4MjU5Nzg2MzI3",
"npr": "https://www.npr.org/podcasts/586725995/the-bay",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-bay",
"spotify": "https://open.spotify.com/show/4BIKBKIujizLHlIlBNaAqQ",
"rss": "https://feeds.megaphone.fm/KQINC8259786327"
}
},
"californiareport": {
"id": "californiareport",
"title": "The California Report",
"tagline": "California, day by day",
"info": "KQED’s statewide radio news program providing daily coverage of issues, trends and public policy decisions.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-California-Report-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The California Report",
"officialWebsiteLink": "/californiareport",
"meta": {
"site": "news",
"source": "kqed",
"order": 9
},
"link": "/californiareport",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/kqeds-the-california-report/id79681292",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM1MDAyODE4NTgz",
"npr": "https://www.npr.org/podcasts/432285393/the-california-report",
"stitcher": "https://www.stitcher.com/podcast/kqedfm-kqeds-the-california-report-podcast-8838",
"rss": "https://ww2.kqed.org/news/tag/tcram/feed/podcast"
}
},
"californiareportmagazine": {
"id": "californiareportmagazine",
"title": "The California Report Magazine",
"tagline": "Your state, your stories",
"info": "Every week, The California Report Magazine takes you on a road trip for the ears: to visit the places and meet the people who make California unique. The in-depth storytelling podcast from the California Report.",
"airtime": "FRI 4:30pm-5pm, 6:30pm-7pm, 11pm-11:30pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-California-Report-Magazine-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The California Report Magazine",
"officialWebsiteLink": "/californiareportmagazine",
"meta": {
"site": "news",
"source": "kqed",
"order": 11
},
"link": "/californiareportmagazine",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-california-report-magazine/id1314750545",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM3NjkwNjk1OTAz",
"npr": "https://www.npr.org/podcasts/564733126/the-california-report-magazine",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-california-report-magazine",
"rss": "https://ww2.kqed.org/news/tag/tcrmag/feed/podcast"
}
},
"closealltabs": {
"id": "closealltabs",
"title": "Close All Tabs",
"tagline": "Your irreverent guide to the trends redefining our world",
"info": "Close All Tabs breaks down how digital culture shapes our world through thoughtful insights and irreverent humor.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/02/CAT_2_Tile-scaled.jpg",
"imageAlt": "\"KQED Close All Tabs",
"officialWebsiteLink": "/podcasts/closealltabs",
"meta": {
"site": "news",
"source": "kqed",
"order": 2
},
"link": "/podcasts/closealltabs",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/close-all-tabs/id214663465",
"rss": "https://feeds.megaphone.fm/KQINC6993880386",
"amazon": "https://music.amazon.com/podcasts/92d9d4ac-67a3-4eed-b10a-fb45d45b1ef2/close-all-tabs",
"spotify": "https://open.spotify.com/show/6LAJFHnGK1pYXYzv6SIol6?si=deb0cae19813417c"
}
},
"thelatest": {
"id": "thelatest",
"title": "The Latest",
"tagline": "Trusted local news in real time",
"info": "",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/05/The-Latest-2025-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Latest",
"officialWebsiteLink": "/thelatest",
"meta": {
"site": "news",
"source": "kqed",
"order": 7
},
"link": "/thelatest",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-latest-from-kqed/id1197721799",
"npr": "https://www.npr.org/podcasts/1257949365/the-latest-from-k-q-e-d",
"spotify": "https://open.spotify.com/show/5KIIXMgM9GTi5AepwOYvIZ?si=bd3053fec7244dba",
"rss": "https://feeds.megaphone.fm/KQINC9137121918"
}
},
"theleap": {
"id": "theleap",
"title": "The Leap",
"tagline": "What if you closed your eyes, and jumped?",
"info": "Stories about people making dramatic, risky changes, told by award-winning public radio reporter Judy Campbell.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Leap-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Leap",
"officialWebsiteLink": "/podcasts/theleap",
"meta": {
"site": "news",
"source": "kqed",
"order": 17
},
"link": "/podcasts/theleap",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/the-leap/id1046668171",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkM0NTcwODQ2MjY2",
"npr": "https://www.npr.org/podcasts/447248267/the-leap",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-leap",
"spotify": "https://open.spotify.com/show/3sSlVHHzU0ytLwuGs1SD1U",
"rss": "https://ww2.kqed.org/news/programs/the-leap/feed/podcast"
}
},
"masters-of-scale": {
"id": "masters-of-scale",
"title": "Masters of Scale",
"info": "Masters of Scale is an original podcast in which LinkedIn co-founder and Greylock Partner Reid Hoffman sets out to describe and prove theories that explain how great entrepreneurs take their companies from zero to a gazillion in ingenious fashion.",
"airtime": "Every other Wednesday June 12 through October 16 at 8pm (repeats Thursdays at 2am)",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Masters-of-Scale-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://mastersofscale.com/",
"meta": {
"site": "radio",
"source": "WaitWhat"
},
"link": "/radio/program/masters-of-scale",
"subscribe": {
"apple": "http://mastersofscale.app.link/",
"rss": "https://rss.art19.com/masters-of-scale"
}
},
"the-moth-radio-hour": {
"id": "the-moth-radio-hour",
"title": "The Moth Radio Hour",
"info": "Since its launch in 1997, The Moth has presented thousands of true stories, told live and without notes, to standing-room-only crowds worldwide. Moth storytellers stand alone, under a spotlight, with only a microphone and a roomful of strangers. The storyteller and the audience embark on a high-wire act of shared experience which is both terrifying and exhilarating. Since 2008, The Moth podcast has featured many of our favorite stories told live on Moth stages around the country. For information on all of our programs and live events, visit themoth.org.",
"airtime": "SAT 8pm-9pm and SUN 11am-12pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/theMoth.jpg",
"officialWebsiteLink": "https://themoth.org/",
"meta": {
"site": "arts",
"source": "prx"
},
"link": "/radio/program/the-moth-radio-hour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/the-moth-podcast/id275699983?mt=2",
"tuneIn": "https://tunein.com/radio/The-Moth-p273888/",
"rss": "http://feeds.themoth.org/themothpodcast"
}
},
"the-new-yorker-radio-hour": {
"id": "the-new-yorker-radio-hour",
"title": "The New Yorker Radio Hour",
"info": "The New Yorker Radio Hour is a weekly program presented by the magazine's editor, David Remnick, and produced by WNYC Studios and The New Yorker. Each episode features a diverse mix of interviews, profiles, storytelling, and an occasional burst of humor inspired by the magazine, and shaped by its writers, artists, and editors. This isn't a radio version of a magazine, but something all its own, reflecting the rich possibilities of audio storytelling and conversation. Theme music for the show was composed and performed by Merrill Garbus of tUnE-YArDs.",
"airtime": "SAT 10am-11am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-New-Yorker-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/tnyradiohour",
"meta": {
"site": "arts",
"source": "WNYC"
},
"link": "/radio/program/the-new-yorker-radio-hour",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/id1050430296",
"tuneIn": "https://tunein.com/podcasts/WNYC-Podcasts/New-Yorker-Radio-Hour-p803804/",
"rss": "https://feeds.feedburner.com/newyorkerradiohour"
}
},
"the-takeaway": {
"id": "the-takeaway",
"title": "The Takeaway",
"info": "The Takeaway is produced in partnership with its national audience. It delivers perspective and analysis to help us better understand the day’s news. Be a part of the American conversation on-air and online.",
"airtime": "MON-THU 12pm-1pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Takeaway-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.wnycstudios.org/shows/takeaway",
"meta": {
"site": "news",
"source": "WNYC"
},
"link": "/radio/program/the-takeaway",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/the-takeaway/id363143310?mt=2",
"tuneIn": "http://tunein.com/radio/The-Takeaway-p150731/",
"rss": "https://feeds.feedburner.com/takeawaypodcast"
}
},
"this-american-life": {
"id": "this-american-life",
"title": "This American Life",
"info": "This American Life is a weekly public radio show, heard by 2.2 million people on more than 500 stations. Another 2.5 million people download the weekly podcast. It is hosted by Ira Glass, produced in collaboration with Chicago Public Media, delivered to stations by PRX The Public Radio Exchange, and has won all of the major broadcasting awards.",
"airtime": "SAT 12pm-1pm, 7pm-8pm",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/thisAmericanLife.png",
"officialWebsiteLink": "https://www.thisamericanlife.org/",
"meta": {
"site": "news",
"source": "wbez"
},
"link": "/radio/program/this-american-life",
"subscribe": {
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=201671138&at=11l79Y&ct=nprdirectory",
"rss": "https://www.thisamericanlife.org/podcast/rss.xml"
}
},
"truthbetold": {
"id": "truthbetold",
"title": "Truth Be Told",
"tagline": "Advice by and for people of color",
"info": "We’re the friend you call after a long day, the one who gets it. Through wisdom from some of the greatest thinkers of our time, host Tonya Mosley explores what it means to grow and thrive as a Black person in America, while discovering new ways of being that serve as a portal to more love, more healing, and more joy.",
"airtime": "",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Truth-Be-Told-Podcast-Tile-360x360-1.jpg",
"imageAlt": "KQED Truth Be Told with Tonya Mosley",
"officialWebsiteLink": "https://www.kqed.ord/podcasts/truthbetold",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/podcasts/truthbetold",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/truth-be-told/id1462216572",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93dzIua3FlZC5vcmcvbmV3cy9jYXRlZ29yeS90cnV0aC1iZS10b2xkLXBvZGNhc3QvZmVlZA",
"npr": "https://www.npr.org/podcasts/719210818/truth-be-told",
"stitcher": "https://www.stitcher.com/s?fid=398170&refid=stpr",
"spotify": "https://open.spotify.com/show/587DhwTBxke6uvfwDfaV5N"
}
},
"wait-wait-dont-tell-me": {
"id": "wait-wait-dont-tell-me",
"title": "Wait Wait... Don't Tell Me!",
"info": "Peter Sagal and Bill Kurtis host the weekly NPR News quiz show alongside some of the best and brightest news and entertainment personalities.",
"airtime": "SUN 10am-11am, SAT 11am-12pm, SAT 6pm-7pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Wait-Wait-Podcast-Tile-300x300-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/wait-wait-dont-tell-me/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/wait-wait-dont-tell-me",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/Xogv",
"apple": "https://itunes.apple.com/WebObjects/MZStore.woa/wa/viewPodcast?s=143441&mt=2&id=121493804&at=11l79Y&ct=nprdirectory",
"tuneIn": "https://tunein.com/radio/Wait-Wait-Dont-Tell-Me-p46/",
"rss": "https://feeds.npr.org/344098539/podcast.xml"
}
},
"washington-week": {
"id": "washington-week",
"title": "Washington Week",
"info": "For 50 years, Washington Week has been the most intelligent and up to date conversation about the most important news stories of the week. Washington Week is the longest-running news and public affairs program on PBS and features journalists -- not pundits -- lending insight and perspective to the week's important news stories.",
"airtime": "SAT 1:30am-2am",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/04/washington-week.jpg",
"officialWebsiteLink": "http://www.pbs.org/weta/washingtonweek/",
"meta": {
"site": "news",
"source": "pbs"
},
"link": "/radio/program/washington-week",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/washington-week-audio-pbs/id83324702?mt=2",
"tuneIn": "https://tunein.com/podcasts/Current-Affairs/Washington-Week-p693/",
"rss": "http://feeds.pbs.org/pbs/weta/washingtonweek-audio"
}
},
"weekend-edition-saturday": {
"id": "weekend-edition-saturday",
"title": "Weekend Edition Saturday",
"info": "Weekend Edition Saturday wraps up the week's news and offers a mix of analysis and features on a wide range of topics, including arts, sports, entertainment, and human interest stories. The two-hour program is hosted by NPR's Peabody Award-winning Scott Simon.",
"airtime": "SAT 5am-10am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Weekend-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/weekend-edition-saturday/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/weekend-edition-saturday"
},
"weekend-edition-sunday": {
"id": "weekend-edition-sunday",
"title": "Weekend Edition Sunday",
"info": "Weekend Edition Sunday features interviews with newsmakers, artists, scientists, politicians, musicians, writers, theologians and historians. The program has covered news events from Nelson Mandela's 1990 release from a South African prison to the capture of Saddam Hussein.",
"airtime": "SUN 5am-10am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Weekend-Edition-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/programs/weekend-edition-sunday/",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/weekend-edition-sunday"
},
"world-affairs": {
"id": "world-affairs",
"title": "World Affairs",
"info": "The world as we knew it is undergoing a rapid transformation…so what's next? Welcome to WorldAffairs, your guide to a changing world. We give you the context you need to navigate across borders and ideologies. Through sound-rich stories and in-depth interviews, we break down what it means to be a global citizen on a hot, crowded planet. Our hosts, Ray Suarez, Teresa Cotsirilos and Philip Yun help you make sense of an uncertain world, one story at a time.",
"airtime": "MON 10pm, TUE 1am, SAT 3am",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/World-Affairs-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.worldaffairs.org/",
"meta": {
"site": "news",
"source": "World Affairs"
},
"link": "/radio/program/world-affairs",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/world-affairs/id101215657?mt=2",
"tuneIn": "https://tunein.com/radio/WorldAffairs-p1665/",
"rss": "https://worldaffairs.libsyn.com/rss"
}
},
"on-shifting-ground": {
"id": "on-shifting-ground",
"title": "On Shifting Ground with Ray Suarez",
"info": "Geopolitical turmoil. A warming planet. Authoritarians on the rise. We live in a chaotic world that’s rapidly shifting around us. “On Shifting Ground with Ray Suarez” explores international fault lines and how they impact us all. Each week, NPR veteran Ray Suarez hosts conversations with journalists, leaders and policy experts to help us read between the headlines – and give us hope for human resilience.",
"airtime": "MON 10pm, TUE 1am, SAT 3am",
"imageSrc": "https://ww2.kqed.org/app/uploads/2022/12/onshiftingground-600x600-1.png",
"officialWebsiteLink": "https://worldaffairs.org/radio-podcast/",
"meta": {
"site": "news",
"source": "On Shifting Ground"
},
"link": "/radio/program/on-shifting-ground",
"subscribe": {
"apple": "https://podcasts.apple.com/ie/podcast/on-shifting-ground/id101215657",
"rss": "https://feeds.libsyn.com/36668/rss"
}
},
"hidden-brain": {
"id": "hidden-brain",
"title": "Hidden Brain",
"info": "Shankar Vedantam uses science and storytelling to reveal the unconscious patterns that drive human behavior, shape our choices and direct our relationships.",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/05/hiddenbrain.jpg",
"officialWebsiteLink": "https://www.npr.org/series/423302056/hidden-brain",
"airtime": "SUN 7pm-8pm",
"meta": {
"site": "news",
"source": "NPR"
},
"link": "/radio/program/hidden-brain",
"subscribe": {
"apple": "https://itunes.apple.com/us/podcast/hidden-brain/id1028908750?mt=2",
"tuneIn": "https://tunein.com/podcasts/Science-Podcasts/Hidden-Brain-p787503/",
"rss": "https://feeds.npr.org/510308/podcast.xml"
}
},
"hyphenacion": {
"id": "hyphenacion",
"title": "Hyphenación",
"tagline": "Where conversation and cultura meet",
"info": "What kind of no sabo word is Hyphenación? For us, it’s about living within a hyphenation. Like being a third-gen Mexican-American from the Texas border now living that Bay Area Chicano life. Like Xorje! Each week we bring together a couple of hyphenated Latinos to talk all about personal life choices: family, careers, relationships, belonging … everything is on the table. ",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/03/Hyphenacion_FinalAssets_PodcastTile.png",
"imageAlt": "KQED Hyphenación",
"officialWebsiteLink": "/podcasts/hyphenacion",
"meta": {
"site": "news",
"source": "kqed",
"order": 1
},
"link": "/podcasts/hyphenacion",
"subscribe": {
"apple": "https://podcasts.apple.com/us/podcast/hyphenaci%C3%B3n/id1191591838",
"spotify": "https://open.spotify.com/show/2p3Fifq96nw9BPcmFdIq0o?si=39209f7b25774f38",
"youtube": "https://www.youtube.com/c/kqedarts",
"amazon": "https://music.amazon.com/podcasts/6c3dd23c-93fb-4aab-97ba-1725fa6315f1/hyphenaci%C3%B3n",
"rss": "https://feeds.megaphone.fm/KQINC2275451163"
}
},
"city-arts": {
"id": "city-arts",
"title": "City Arts & Lectures",
"info": "A one-hour radio program to hear celebrated writers, artists and thinkers address contemporary ideas and values, often discussing the creative process. Please note: tapes or transcripts are not available",
"imageSrc": "https://ww2.kqed.org/radio/wp-content/uploads/sites/50/2018/05/cityartsandlecture-300x300.jpg",
"officialWebsiteLink": "https://www.cityarts.net/",
"airtime": "SUN 1pm-2pm, TUE 10pm, WED 1am",
"meta": {
"site": "news",
"source": "City Arts & Lectures"
},
"link": "https://www.cityarts.net",
"subscribe": {
"tuneIn": "https://tunein.com/radio/City-Arts-and-Lectures-p692/",
"rss": "https://www.cityarts.net/feed/"
}
},
"white-lies": {
"id": "white-lies",
"title": "White Lies",
"info": "In 1965, Rev. James Reeb was murdered in Selma, Alabama. Three men were tried and acquitted, but no one was ever held to account. Fifty years later, two journalists from Alabama return to the city where it happened, expose the lies that kept the murder from being solved and uncover a story about guilt and memory that says as much about America today as it does about the past.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/White-Lies-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.npr.org/podcasts/510343/white-lies",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/white-lies",
"subscribe": {
"npr": "https://rpb3r.app.goo.gl/whitelies",
"apple": "https://podcasts.apple.com/podcast/id1462650519?mt=2&at=11l79Y&ct=nprdirectory",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5ucHIub3JnLzUxMDM0My9wb2RjYXN0LnhtbA",
"spotify": "https://open.spotify.com/show/12yZ2j8vxqhc0QZyRES3ft?si=LfWYEK6URA63hueKVxRLAw",
"rss": "https://feeds.npr.org/510343/podcast.xml"
}
},
"rightnowish": {
"id": "rightnowish",
"title": "Rightnowish",
"tagline": "Art is where you find it",
"info": "Rightnowish digs into life in the Bay Area right now… ish. Journalist Pendarvis Harshaw takes us to galleries painted on the sides of liquor stores in West Oakland. We'll dance in warehouses in the Bayview, make smoothies with kids in South Berkeley, and listen to classical music in a 1984 Cutlass Supreme in Richmond. Every week, Pen talks to movers and shakers about how the Bay Area shapes what they create, and how they shape the place we call home.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/Rightnowish-Podcast-Tile-500x500-1.jpg",
"imageAlt": "KQED Rightnowish with Pendarvis Harshaw",
"officialWebsiteLink": "/podcasts/rightnowish",
"meta": {
"site": "arts",
"source": "kqed",
"order": 16
},
"link": "/podcasts/rightnowish",
"subscribe": {
"npr": "https://www.npr.org/podcasts/721590300/rightnowish",
"rss": "https://ww2.kqed.org/arts/programs/rightnowish/feed/podcast",
"apple": "https://podcasts.apple.com/us/podcast/rightnowish/id1482187648",
"stitcher": "https://www.stitcher.com/podcast/kqed/rightnowish",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly9mZWVkcy5tZWdhcGhvbmUuZm0vS1FJTkMxMjU5MTY3NDc4",
"spotify": "https://open.spotify.com/show/7kEJuafTzTVan7B78ttz1I"
}
},
"jerrybrown": {
"id": "jerrybrown",
"title": "The Political Mind of Jerry Brown",
"tagline": "Lessons from a lifetime in politics",
"info": "The Political Mind of Jerry Brown brings listeners the wisdom of the former Governor, Mayor, and presidential candidate. Scott Shafer interviewed Brown for more than 40 hours, covering the former governor's life and half-century in the political game and Brown has some lessons he'd like to share. ",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Political-Mind-of-Jerry-Brown-Podcast-Tile-703x703-1.jpg",
"imageAlt": "KQED The Political Mind of Jerry Brown",
"officialWebsiteLink": "/podcasts/jerrybrown",
"meta": {
"site": "news",
"source": "kqed",
"order": 18
},
"link": "/podcasts/jerrybrown",
"subscribe": {
"npr": "https://www.npr.org/podcasts/790253322/the-political-mind-of-jerry-brown",
"apple": "https://itunes.apple.com/us/podcast/id1492194549",
"rss": "https://ww2.kqed.org/news/series/jerrybrown/feed/podcast/",
"tuneIn": "http://tun.in/pjGcK",
"stitcher": "https://www.stitcher.com/podcast/kqed/the-political-mind-of-jerry-brown",
"spotify": "https://open.spotify.com/show/54C1dmuyFyKMFttY6X2j6r?si=K8SgRCoISNK6ZbjpXrX5-w",
"google": "https://podcasts.google.com/feed/aHR0cHM6Ly93dzIua3FlZC5vcmcvbmV3cy9zZXJpZXMvamVycnlicm93bi9mZWVkL3BvZGNhc3Qv"
}
},
"tinydeskradio": {
"id": "tinydeskradio",
"title": "Tiny Desk Radio",
"info": "We're bringing the best of Tiny Desk to the airwaves, only on public radio.",
"airtime": "SUN 8pm and SAT 9pm",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2025/04/300x300-For-Member-Station-Logo-Tiny-Desk-Radio-@2x.png",
"officialWebsiteLink": "https://www.npr.org/series/g-s1-52030/tiny-desk-radio",
"meta": {
"site": "news",
"source": "npr"
},
"link": "/radio/program/tinydeskradio",
"subscribe": {
"rss": "https://feeds.npr.org/g-s1-52030/rss.xml"
}
},
"the-splendid-table": {
"id": "the-splendid-table",
"title": "The Splendid Table",
"info": "\u003cem>The Splendid Table\u003c/em> hosts our nation's conversations about cooking, sustainability and food culture.",
"imageSrc": "https://cdn.kqed.org/wp-content/uploads/2024/04/The-Splendid-Table-Podcast-Tile-360x360-1.jpg",
"officialWebsiteLink": "https://www.splendidtable.org/",
"airtime": "SUN 10-11 pm",
"meta": {
"site": "radio",
"source": "npr"
},
"link": "/radio/program/the-splendid-table"
}
},
"racesReducer": {},
"racesGenElectionReducer": {},
"radioSchedulesReducer": {},
"listsReducer": {
"posts/news?tag=artificial-intelligence": {
"isFetching": false,
"latestQuery": {
"from": 0,
"postsToRender": 9
},
"tag": null,
"vitalsOnly": true,
"totalRequested": 9,
"isLoading": false,
"isLoadingMore": true,
"total": {
"value": 48,
"relation": "eq"
},
"items": [
"news_12065748",
"news_12063587",
"news_12061462",
"news_12060365",
"news_12059911",
"news_12059714",
"news_12058013",
"news_12055125",
"news_12054417"
]
}
},
"recallGuideReducer": {
"intros": {},
"policy": {},
"candidates": {}
},
"savedArticleReducer": {
"articles": [],
"status": {}
},
"pfsSessionReducer": {},
"subscriptionsReducer": {},
"termsReducer": {
"about": {
"name": "About",
"type": "terms",
"id": "about",
"slug": "about",
"link": "/about",
"taxonomy": "site"
},
"arts": {
"name": "Arts & Culture",
"grouping": [
"arts",
"pop",
"trulyca"
],
"description": "KQED Arts provides daily in-depth coverage of the Bay Area's music, art, film, performing arts, literature and arts news, as well as cultural commentary and criticism.",
"type": "terms",
"id": "arts",
"slug": "arts",
"link": "/arts",
"taxonomy": "site"
},
"artschool": {
"name": "Art School",
"parent": "arts",
"type": "terms",
"id": "artschool",
"slug": "artschool",
"link": "/artschool",
"taxonomy": "site"
},
"bayareabites": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"parent": "food",
"type": "terms",
"id": "bayareabites",
"slug": "bayareabites",
"link": "/food",
"taxonomy": "site"
},
"bayareahiphop": {
"name": "Bay Area Hiphop",
"type": "terms",
"id": "bayareahiphop",
"slug": "bayareahiphop",
"link": "/bayareahiphop",
"taxonomy": "site"
},
"campaign21": {
"name": "Campaign 21",
"type": "terms",
"id": "campaign21",
"slug": "campaign21",
"link": "/campaign21",
"taxonomy": "site"
},
"checkplease": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"parent": "food",
"type": "terms",
"id": "checkplease",
"slug": "checkplease",
"link": "/food",
"taxonomy": "site"
},
"education": {
"name": "Education",
"grouping": [
"education"
],
"type": "terms",
"id": "education",
"slug": "education",
"link": "/education",
"taxonomy": "site"
},
"elections": {
"name": "Elections",
"type": "terms",
"id": "elections",
"slug": "elections",
"link": "/elections",
"taxonomy": "site"
},
"events": {
"name": "Events",
"type": "terms",
"id": "events",
"slug": "events",
"link": "/events",
"taxonomy": "site"
},
"event": {
"name": "Event",
"alias": "events",
"type": "terms",
"id": "event",
"slug": "event",
"link": "/event",
"taxonomy": "site"
},
"filmschoolshorts": {
"name": "Film School Shorts",
"type": "terms",
"id": "filmschoolshorts",
"slug": "filmschoolshorts",
"link": "/filmschoolshorts",
"taxonomy": "site"
},
"food": {
"name": "KQED food",
"grouping": [
"food",
"bayareabites",
"checkplease"
],
"type": "terms",
"id": "food",
"slug": "food",
"link": "/food",
"taxonomy": "site"
},
"forum": {
"name": "Forum",
"relatedContentQuery": "posts/forum?",
"parent": "news",
"type": "terms",
"id": "forum",
"slug": "forum",
"link": "/forum",
"taxonomy": "site"
},
"futureofyou": {
"name": "Future of You",
"grouping": [
"science",
"futureofyou"
],
"parent": "science",
"type": "terms",
"id": "futureofyou",
"slug": "futureofyou",
"link": "/futureofyou",
"taxonomy": "site"
},
"jpepinheart": {
"name": "KQED food",
"relatedContentQuery": "posts/food,bayareabites,checkplease",
"parent": "food",
"type": "terms",
"id": "jpepinheart",
"slug": "jpepinheart",
"link": "/food",
"taxonomy": "site"
},
"liveblog": {
"name": "Live Blog",
"type": "terms",
"id": "liveblog",
"slug": "liveblog",
"link": "/liveblog",
"taxonomy": "site"
},
"livetv": {
"name": "Live TV",
"parent": "tv",
"type": "terms",
"id": "livetv",
"slug": "livetv",
"link": "/livetv",
"taxonomy": "site"
},
"lowdown": {
"name": "The Lowdown",
"relatedContentQuery": "posts/lowdown?",
"parent": "news",
"type": "terms",
"id": "lowdown",
"slug": "lowdown",
"link": "/lowdown",
"taxonomy": "site"
},
"mindshift": {
"name": "Mindshift",
"parent": "news",
"description": "MindShift explores the future of education by highlighting the innovative – and sometimes counterintuitive – ways educators and parents are helping all children succeed.",
"type": "terms",
"id": "mindshift",
"slug": "mindshift",
"link": "/mindshift",
"taxonomy": "site"
},
"news": {
"name": "News",
"grouping": [
"news",
"forum"
],
"type": "terms",
"id": "news",
"slug": "news",
"link": "/news",
"taxonomy": "site"
},
"perspectives": {
"name": "Perspectives",
"parent": "radio",
"type": "terms",
"id": "perspectives",
"slug": "perspectives",
"link": "/perspectives",
"taxonomy": "site"
},
"podcasts": {
"name": "Podcasts",
"type": "terms",
"id": "podcasts",
"slug": "podcasts",
"link": "/podcasts",
"taxonomy": "site"
},
"pop": {
"name": "Pop",
"parent": "arts",
"type": "terms",
"id": "pop",
"slug": "pop",
"link": "/pop",
"taxonomy": "site"
},
"pressroom": {
"name": "Pressroom",
"type": "terms",
"id": "pressroom",
"slug": "pressroom",
"link": "/pressroom",
"taxonomy": "site"
},
"quest": {
"name": "Quest",
"parent": "science",
"type": "terms",
"id": "quest",
"slug": "quest",
"link": "/quest",
"taxonomy": "site"
},
"radio": {
"name": "Radio",
"grouping": [
"forum",
"perspectives"
],
"description": "Listen to KQED Public Radio – home of Forum and The California Report – on 88.5 FM in San Francisco, 89.3 FM in Sacramento, 88.3 FM in Santa Rosa and 88.1 FM in Martinez.",
"type": "terms",
"id": "radio",
"slug": "radio",
"link": "/radio",
"taxonomy": "site"
},
"root": {
"name": "KQED",
"image": "https://ww2.kqed.org/app/uploads/2020/02/KQED-OG-Image@1x.png",
"imageWidth": 1200,
"imageHeight": 630,
"headData": {
"title": "KQED | News, Radio, Podcasts, TV | Public Media for Northern California",
"description": "KQED provides public radio, television, and independent reporting on issues that matter to the Bay Area. We’re the NPR and PBS member station for Northern California."
},
"type": "terms",
"id": "root",
"slug": "root",
"link": "/root",
"taxonomy": "site"
},
"science": {
"name": "Science",
"grouping": [
"science",
"futureofyou"
],
"description": "KQED Science brings you award-winning science and environment coverage from the Bay Area and beyond.",
"type": "terms",
"id": "science",
"slug": "science",
"link": "/science",
"taxonomy": "site"
},
"stateofhealth": {
"name": "State of Health",
"parent": "science",
"type": "terms",
"id": "stateofhealth",
"slug": "stateofhealth",
"link": "/stateofhealth",
"taxonomy": "site"
},
"support": {
"name": "Support",
"type": "terms",
"id": "support",
"slug": "support",
"link": "/support",
"taxonomy": "site"
},
"thedolist": {
"name": "The Do List",
"parent": "arts",
"type": "terms",
"id": "thedolist",
"slug": "thedolist",
"link": "/thedolist",
"taxonomy": "site"
},
"trulyca": {
"name": "Truly CA",
"grouping": [
"arts",
"pop",
"trulyca"
],
"parent": "arts",
"type": "terms",
"id": "trulyca",
"slug": "trulyca",
"link": "/trulyca",
"taxonomy": "site"
},
"tv": {
"name": "TV",
"type": "terms",
"id": "tv",
"slug": "tv",
"link": "/tv",
"taxonomy": "site"
},
"voterguide": {
"name": "Voter Guide",
"parent": "elections",
"alias": "elections",
"type": "terms",
"id": "voterguide",
"slug": "voterguide",
"link": "/voterguide",
"taxonomy": "site"
},
"guiaelectoral": {
"name": "Guia Electoral",
"parent": "elections",
"alias": "elections",
"type": "terms",
"id": "guiaelectoral",
"slug": "guiaelectoral",
"link": "/guiaelectoral",
"taxonomy": "site"
},
"news_34755": {
"type": "terms",
"id": "news_34755",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34755",
"found": true
},
"relationships": {},
"name": "artificial intelligence",
"slug": "artificial-intelligence",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "artificial intelligence | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null,
"imageData": {
"ogImageSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png",
"width": 1200,
"height": 630
},
"twImageSize": {
"file": "https://cdn.kqed.org/wp-content/uploads/2020/02/KQED-OG-Image@1x.png"
},
"twitterCard": "summary_large_image"
}
},
"ttid": 34772,
"isLoading": false,
"link": "/news/tag/artificial-intelligence"
},
"source_news_12065748": {
"type": "terms",
"id": "source_news_12065748",
"meta": {
"override": true
},
"name": "Political Breakdown",
"isLoading": false
},
"source_news_12063587": {
"type": "terms",
"id": "source_news_12063587",
"meta": {
"override": true
},
"name": "The California Report",
"link": "https://www.kqed.org/news/tag/tcrarchive/",
"isLoading": false
},
"source_news_12059911": {
"type": "terms",
"id": "source_news_12059911",
"meta": {
"override": true
},
"name": "Close All Tabs",
"link": "https://www.kqed.org/podcasts/closealltabs",
"isLoading": false
},
"source_news_12054417": {
"type": "terms",
"id": "source_news_12054417",
"meta": {
"override": true
},
"name": "The Bay",
"link": "https://www.kqed.org/podcasts/thebay",
"isLoading": false
},
"news_33544": {
"type": "terms",
"id": "news_33544",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33544",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Political Breakdown",
"description": null,
"taxonomy": "program",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Political Breakdown Archives | KQED News",
"ogDescription": null
},
"ttid": 33561,
"slug": "political-breakdown",
"isLoading": false,
"link": "/news/program/political-breakdown"
},
"news_8": {
"type": "terms",
"id": "news_8",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "8",
"found": true
},
"relationships": {},
"featImg": null,
"name": "News",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "News Archives | KQED News",
"ogDescription": null
},
"ttid": 8,
"slug": "news",
"isLoading": false,
"link": "/news/category/news"
},
"news_13": {
"type": "terms",
"id": "news_13",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "13",
"found": true
},
"relationships": {},
"name": "Politics",
"slug": "politics",
"taxonomy": "category",
"description": null,
"featImg": null,
"headData": {
"title": "Politics | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 13,
"isLoading": false,
"link": "/news/category/politics"
},
"news_25184": {
"type": "terms",
"id": "news_25184",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "25184",
"found": true
},
"relationships": {},
"featImg": null,
"name": "AI",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "AI Archives | KQED News",
"ogDescription": null
},
"ttid": 25201,
"slug": "ai",
"isLoading": false,
"link": "/news/tag/ai"
},
"news_22757": {
"type": "terms",
"id": "news_22757",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "22757",
"found": true
},
"relationships": {},
"featImg": null,
"name": "cryptocurrency",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "cryptocurrency Archives | KQED News",
"ogDescription": null
},
"ttid": 22774,
"slug": "cryptocurrency",
"isLoading": false,
"link": "/news/tag/cryptocurrency"
},
"news_36169": {
"type": "terms",
"id": "news_36169",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "36169",
"found": true
},
"relationships": {},
"name": "david sacks",
"slug": "david-sacks",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "david sacks | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 36186,
"isLoading": false,
"link": "/news/tag/david-sacks"
},
"news_34377": {
"type": "terms",
"id": "news_34377",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34377",
"found": true
},
"relationships": {},
"name": "featured-politics",
"slug": "featured-politics",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "featured-politics Archives | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 34394,
"isLoading": false,
"link": "/news/tag/featured-politics"
},
"news_22235": {
"type": "terms",
"id": "news_22235",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "22235",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Political Breakdown",
"description": "\u003cimg class=\"alignnone size-medium wp-image-11638190\" src=\"https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/01/PB-for-FB-links.png\" alt=\"\" />\r\n\r\nJoin hosts\u003cstrong> Scott Shafer\u003c/strong> and \u003cstrong>Marisa Lagos\u003c/strong> as they unpack the week in politics with a California perspective. Featuring interviews with reporters and other insiders involved in the craft of politics—including elected officials, candidates, pollsters, campaign managers, fundraisers, and other political players—\u003ci>Political Breakdown \u003c/i>pulls back the curtain to offer an insider’s glimpse at how politics works today.\r\n\r\n\u003ca href=\"https://itunes.apple.com/us/podcast/political-breakdown/id1327641087?mt=2\">\u003cimg src=\"https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/01/Listen_on_Apple_Podcasts_sRGB_US-e1515635079510.png\" />\u003c/a>",
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": "Join hosts Scott Shafer and Marisa Lagos as they unpack the week in politics with a California perspective. Featuring interviews with reporters and other insiders involved in the craft of politics—including elected officials, candidates, pollsters, campaign managers, fundraisers, and other political players—Political Breakdown pulls back the curtain to offer an insider’s glimpse at how politics works today.",
"title": "Political Breakdown Archives | KQED News",
"ogDescription": null
},
"ttid": 22252,
"slug": "political-breakdown",
"isLoading": false,
"link": "/news/tag/political-breakdown"
},
"news_17968": {
"type": "terms",
"id": "news_17968",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "17968",
"found": true
},
"relationships": {},
"name": "Politics",
"slug": "politics",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Politics | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 18002,
"isLoading": false,
"link": "/news/tag/politics"
},
"news_33734": {
"type": "terms",
"id": "news_33734",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33734",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Local Politics",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Local Politics Archives | KQED News",
"ogDescription": null
},
"ttid": 33751,
"slug": "local-politics",
"isLoading": false,
"link": "/news/interest/local-politics"
},
"news_72": {
"type": "terms",
"id": "news_72",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "72",
"found": true
},
"relationships": {},
"featImg": "https://ww2.kqed.org/app/uploads/sites/10/2014/10/TCR-2-Logo-Web-Banners-03.png",
"name": "The California Report",
"description": null,
"taxonomy": "program",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "The California Report Archives | KQED News",
"ogDescription": null
},
"ttid": 6969,
"slug": "the-california-report",
"isLoading": false,
"link": "/news/program/the-california-report"
},
"news_33520": {
"type": "terms",
"id": "news_33520",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33520",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Podcast",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Podcast Archives | KQED News",
"ogDescription": null
},
"ttid": 33537,
"slug": "podcast",
"isLoading": false,
"link": "/news/category/podcast"
},
"news_34018": {
"type": "terms",
"id": "news_34018",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34018",
"found": true
},
"relationships": {},
"featImg": null,
"name": "tcr",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "tcr Archives | KQED News",
"ogDescription": null
},
"ttid": 34035,
"slug": "tcr",
"isLoading": false,
"link": "/news/category/tcr"
},
"news_36087": {
"type": "terms",
"id": "news_36087",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "36087",
"found": true
},
"relationships": {},
"name": "data centers",
"slug": "data-centers",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "data centers | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 36104,
"isLoading": false,
"link": "/news/tag/data-centers"
},
"news_36091": {
"type": "terms",
"id": "news_36091",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "36091",
"found": true
},
"relationships": {},
"name": "decommissioned",
"slug": "decommissioned",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "decommissioned | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 36108,
"isLoading": false,
"link": "/news/tag/decommissioned"
},
"news_36089": {
"type": "terms",
"id": "news_36089",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "36089",
"found": true
},
"relationships": {},
"name": "environmentalists",
"slug": "environmentalists",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "environmentalists | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 36106,
"isLoading": false,
"link": "/news/tag/environmentalists"
},
"news_36088": {
"type": "terms",
"id": "news_36088",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "36088",
"found": true
},
"relationships": {},
"name": "natural resources",
"slug": "natural-resources",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "natural resources | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 36105,
"isLoading": false,
"link": "/news/tag/natural-resources"
},
"news_36090": {
"type": "terms",
"id": "news_36090",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "36090",
"found": true
},
"relationships": {},
"name": "offshore drilling",
"slug": "offshore-drilling",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "offshore drilling | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 36107,
"isLoading": false,
"link": "/news/tag/offshore-drilling"
},
"news_21998": {
"type": "terms",
"id": "news_21998",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "21998",
"found": true
},
"relationships": {},
"featImg": null,
"name": "TCRAM",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "TCRAM Archives | KQED News",
"ogDescription": null
},
"ttid": 22015,
"slug": "tcram",
"isLoading": false,
"link": "/news/tag/tcram"
},
"news_21268": {
"type": "terms",
"id": "news_21268",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "21268",
"found": true
},
"relationships": {},
"featImg": null,
"name": "tcrarchive",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "tcrarchive Archives | KQED News",
"ogDescription": null
},
"ttid": 21285,
"slug": "tcrarchive",
"isLoading": false,
"link": "/news/tag/tcrarchive"
},
"news_31795": {
"type": "terms",
"id": "news_31795",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "31795",
"found": true
},
"relationships": {},
"featImg": null,
"name": "California",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "California Archives | KQED News",
"ogDescription": null
},
"ttid": 31812,
"slug": "california",
"isLoading": false,
"link": "/news/category/california"
},
"news_34167": {
"type": "terms",
"id": "news_34167",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34167",
"found": true
},
"relationships": {},
"name": "Criminal Justice",
"slug": "criminal-justice",
"taxonomy": "category",
"description": null,
"featImg": null,
"headData": {
"title": "Criminal Justice Archives | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 34184,
"isLoading": false,
"link": "/news/category/criminal-justice"
},
"news_248": {
"type": "terms",
"id": "news_248",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "248",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Technology",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Technology Archives | KQED News",
"ogDescription": null
},
"ttid": 256,
"slug": "technology",
"isLoading": false,
"link": "/news/category/technology"
},
"news_32664": {
"type": "terms",
"id": "news_32664",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "32664",
"found": true
},
"relationships": {},
"name": "AI software",
"slug": "ai-software",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "AI software | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 32681,
"isLoading": false,
"link": "/news/tag/ai-software"
},
"news_1386": {
"type": "terms",
"id": "news_1386",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "1386",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Bay Area",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Bay Area Archives | KQED News",
"ogDescription": null
},
"ttid": 1398,
"slug": "bay-area",
"isLoading": false,
"link": "/news/tag/bay-area"
},
"news_18538": {
"type": "terms",
"id": "news_18538",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "18538",
"found": true
},
"relationships": {},
"featImg": null,
"name": "California",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "California Archives | KQED News",
"ogDescription": null
},
"ttid": 31,
"slug": "california",
"isLoading": false,
"link": "/news/tag/california"
},
"news_27626": {
"type": "terms",
"id": "news_27626",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "27626",
"found": true
},
"relationships": {},
"featImg": null,
"name": "featured-news",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "featured-news Archives | KQED News",
"ogDescription": null
},
"ttid": 27643,
"slug": "featured-news",
"isLoading": false,
"link": "/news/tag/featured-news"
},
"news_116": {
"type": "terms",
"id": "news_116",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "116",
"found": true
},
"relationships": {},
"featImg": null,
"name": "police",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "police Archives | KQED News",
"ogDescription": null
},
"ttid": 120,
"slug": "police",
"isLoading": false,
"link": "/news/tag/police"
},
"news_34586": {
"type": "terms",
"id": "news_34586",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34586",
"found": true
},
"relationships": {},
"name": "Silicon Valley",
"slug": "silicon-valley",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Silicon Valley | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 34603,
"isLoading": false,
"link": "/news/tag/silicon-valley"
},
"news_35940": {
"type": "terms",
"id": "news_35940",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "35940",
"found": true
},
"relationships": {},
"name": "tech industry",
"slug": "tech-industry",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "tech industry | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 35957,
"isLoading": false,
"link": "/news/tag/tech-industry"
},
"news_1631": {
"type": "terms",
"id": "news_1631",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "1631",
"found": true
},
"relationships": {},
"name": "Technology",
"slug": "technology",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Technology | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 1643,
"isLoading": false,
"link": "/news/tag/technology"
},
"news_29969": {
"type": "terms",
"id": "news_29969",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "29969",
"found": true
},
"relationships": {},
"featImg": null,
"name": "KVPR",
"description": null,
"taxonomy": "affiliate",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "KVPR Archives | KQED Arts",
"ogDescription": null
},
"ttid": 29986,
"slug": "kvpr",
"isLoading": false,
"link": "/news/affiliate/kvpr"
},
"news_33745": {
"type": "terms",
"id": "news_33745",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33745",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Criminal Justice",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Criminal Justice Archives | KQED News",
"ogDescription": null
},
"ttid": 33762,
"slug": "criminal-justice",
"isLoading": false,
"link": "/news/interest/criminal-justice"
},
"news_33733": {
"type": "terms",
"id": "news_33733",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33733",
"found": true
},
"relationships": {},
"featImg": null,
"name": "News",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "News Archives | KQED News",
"ogDescription": null
},
"ttid": 33750,
"slug": "news",
"isLoading": false,
"link": "/news/interest/news"
},
"news_33732": {
"type": "terms",
"id": "news_33732",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33732",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Technology",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Technology Archives | KQED News",
"ogDescription": null
},
"ttid": 33749,
"slug": "technology",
"isLoading": false,
"link": "/news/interest/technology"
},
"news_32668": {
"type": "terms",
"id": "news_32668",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "32668",
"found": true
},
"relationships": {},
"featImg": null,
"name": "ChatGPT",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "ChatGPT Archives | KQED News",
"ogDescription": null
},
"ttid": 32685,
"slug": "chatgpt",
"isLoading": false,
"link": "/news/tag/chatgpt"
},
"news_29886": {
"type": "terms",
"id": "news_29886",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "29886",
"found": true
},
"relationships": {},
"featImg": null,
"name": "children's health",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "children's health Archives | KQED News",
"ogDescription": null
},
"ttid": 29903,
"slug": "childrens-health",
"isLoading": false,
"link": "/news/tag/childrens-health"
},
"news_2109": {
"type": "terms",
"id": "news_2109",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "2109",
"found": true
},
"relationships": {},
"featImg": null,
"name": "mental health",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "mental health Archives | KQED News",
"ogDescription": null
},
"ttid": 2124,
"slug": "mental-health",
"isLoading": false,
"link": "/news/tag/mental-health"
},
"news_33542": {
"type": "terms",
"id": "news_33542",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33542",
"found": true
},
"relationships": {},
"featImg": null,
"name": "OpenAI",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "OpenAI Archives | KQED News",
"ogDescription": null
},
"ttid": 33559,
"slug": "openai",
"isLoading": false,
"link": "/news/tag/openai"
},
"news_22456": {
"type": "terms",
"id": "news_22456",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "22456",
"found": true
},
"relationships": {},
"featImg": null,
"name": "public safety",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "public safety Archives | KQED News",
"ogDescription": null
},
"ttid": 22473,
"slug": "public-safety",
"isLoading": false,
"link": "/news/tag/public-safety"
},
"news_33543": {
"type": "terms",
"id": "news_33543",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33543",
"found": true
},
"relationships": {},
"name": "Sam Altman",
"slug": "sam-altman",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Sam Altman | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null,
"metaRobotsNoIndex": "noindex"
},
"ttid": 33560,
"isLoading": false,
"link": "/news/tag/sam-altman"
},
"news_38": {
"type": "terms",
"id": "news_38",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "38",
"found": true
},
"relationships": {},
"featImg": null,
"name": "San Francisco",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "San Francisco Archives | KQED News",
"ogDescription": null
},
"ttid": 58,
"slug": "san-francisco",
"isLoading": false,
"link": "/news/tag/san-francisco"
},
"news_21121": {
"type": "terms",
"id": "news_21121",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "21121",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Teenagers",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Teenagers Archives | KQED News",
"ogDescription": null
},
"ttid": 21138,
"slug": "teenagers",
"isLoading": false,
"link": "/news/tag/teenagers"
},
"news_20385": {
"type": "terms",
"id": "news_20385",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "20385",
"found": true
},
"relationships": {},
"featImg": null,
"name": "teens",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "teens Archives | KQED News",
"ogDescription": null
},
"ttid": 20402,
"slug": "teens",
"isLoading": false,
"link": "/news/tag/teens"
},
"news_33729": {
"type": "terms",
"id": "news_33729",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33729",
"found": true
},
"relationships": {},
"featImg": null,
"name": "San Francisco",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "San Francisco Archives | KQED News",
"ogDescription": null
},
"ttid": 33746,
"slug": "san-francisco",
"isLoading": false,
"link": "/news/interest/san-francisco"
},
"news_35082": {
"type": "terms",
"id": "news_35082",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "35082",
"found": true
},
"relationships": {},
"name": "Close All Tabs",
"slug": "close-all-tabs",
"taxonomy": "program",
"description": null,
"featImg": null,
"headData": {
"title": "Close All Tabs | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 35099,
"isLoading": false,
"link": "/news/program/close-all-tabs"
},
"news_22973": {
"type": "terms",
"id": "news_22973",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "22973",
"found": true
},
"relationships": {},
"featImg": null,
"name": "culture",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "culture Archives | KQED News",
"ogDescription": null
},
"ttid": 22990,
"slug": "culture",
"isLoading": false,
"link": "/news/tag/culture"
},
"news_3137": {
"type": "terms",
"id": "news_3137",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "3137",
"found": true
},
"relationships": {},
"featImg": null,
"name": "internet",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "internet Archives | KQED News",
"ogDescription": null
},
"ttid": 3155,
"slug": "internet",
"isLoading": false,
"link": "/news/tag/internet"
},
"news_34646": {
"type": "terms",
"id": "news_34646",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34646",
"found": true
},
"relationships": {},
"name": "internet culture",
"slug": "internet-culture",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "internet culture | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 34663,
"isLoading": false,
"link": "/news/tag/internet-culture"
},
"news_22307": {
"type": "terms",
"id": "news_22307",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "22307",
"found": true
},
"relationships": {},
"featImg": null,
"name": "california laws",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "california laws Archives | KQED News",
"ogDescription": null
},
"ttid": 22324,
"slug": "california-laws",
"isLoading": false,
"link": "/news/tag/california-laws"
},
"news_30826": {
"type": "terms",
"id": "news_30826",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "30826",
"found": true
},
"relationships": {},
"featImg": null,
"name": "children's mental health",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "children's mental health Archives | KQED News",
"ogDescription": null
},
"ttid": 30843,
"slug": "childrens-mental-health",
"isLoading": false,
"link": "/news/tag/childrens-mental-health"
},
"news_16": {
"type": "terms",
"id": "news_16",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "16",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Gavin Newsom",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Gavin Newsom Archives | KQED News",
"ogDescription": null
},
"ttid": 16,
"slug": "gavin-newsom",
"isLoading": false,
"link": "/news/tag/gavin-newsom"
},
"news_34532": {
"type": "terms",
"id": "news_34532",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "34532",
"found": true
},
"relationships": {},
"name": "new bills",
"slug": "new-bills",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "new bills | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null,
"metaRobotsNoIndex": "noindex"
},
"ttid": 34549,
"isLoading": false,
"link": "/news/tag/new-bills"
},
"news_21285": {
"type": "terms",
"id": "news_21285",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "21285",
"found": true
},
"relationships": {},
"featImg": null,
"name": "South Bay",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "South Bay Archives | KQED News",
"ogDescription": null
},
"ttid": 21302,
"slug": "south-bay",
"isLoading": false,
"link": "/news/tag/south-bay"
},
"news_33731": {
"type": "terms",
"id": "news_33731",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33731",
"found": true
},
"relationships": {},
"featImg": null,
"name": "South Bay",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "South Bay Archives | KQED News",
"ogDescription": null
},
"ttid": 33748,
"slug": "south-bay",
"isLoading": false,
"link": "/news/interest/south-bay"
},
"news_28250": {
"type": "terms",
"id": "news_28250",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "28250",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Local",
"description": null,
"taxonomy": "category",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Local Archives | KQED News",
"ogDescription": null
},
"ttid": 28267,
"slug": "local",
"isLoading": false,
"link": "/news/category/local"
},
"news_33738": {
"type": "terms",
"id": "news_33738",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33738",
"found": true
},
"relationships": {},
"featImg": null,
"name": "California",
"description": null,
"taxonomy": "interest",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "California Archives | KQED News",
"ogDescription": null
},
"ttid": 33755,
"slug": "california",
"isLoading": false,
"link": "/news/interest/california"
},
"news_35758": {
"type": "terms",
"id": "news_35758",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "35758",
"found": true
},
"relationships": {},
"name": "Open AI",
"slug": "open-ai",
"taxonomy": "tag",
"description": null,
"featImg": null,
"headData": {
"title": "Open AI | KQED News",
"description": null,
"ogTitle": null,
"ogDescription": null,
"ogImgId": null,
"twTitle": null,
"twDescription": null,
"twImgId": null
},
"ttid": 35775,
"isLoading": false,
"link": "/news/tag/open-ai"
},
"news_1323": {
"type": "terms",
"id": "news_1323",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "1323",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Donald Trump",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Donald Trump Archives | KQED News",
"ogDescription": null
},
"ttid": 1335,
"slug": "donald-trump",
"isLoading": false,
"link": "/news/tag/donald-trump"
},
"news_33812": {
"type": "terms",
"id": "news_33812",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "33812",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Interests",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Interests Archives | KQED News",
"ogDescription": null
},
"ttid": 33829,
"slug": "interests",
"isLoading": false,
"link": "/news/tag/interests"
},
"news_250": {
"type": "terms",
"id": "news_250",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "250",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Mark Zuckerberg",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Mark Zuckerberg Archives | KQED News",
"ogDescription": null
},
"ttid": 258,
"slug": "mark-zuckerberg",
"isLoading": false,
"link": "/news/tag/mark-zuckerberg"
},
"news_80": {
"type": "terms",
"id": "news_80",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "80",
"found": true
},
"relationships": {},
"featImg": null,
"name": "Military",
"description": null,
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": null,
"title": "Military Archives | KQED News",
"ogDescription": null
},
"ttid": 81,
"slug": "military",
"isLoading": false,
"link": "/news/tag/military"
},
"news_22598": {
"type": "terms",
"id": "news_22598",
"meta": {
"index": "terms_1716263798",
"site": "news",
"id": "22598",
"found": true
},
"relationships": {},
"featImg": null,
"name": "The Bay",
"description": "\u003cimg class=\"alignnone size-medium wp-image-11638190\" src=\"https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/02/TheBay_1200x6301.png\" alt=\"\" />\r\n\u003cbr/>\r\n\r\nEvery good story starts local. So that’s where we start. \u003ci>The Bay\u003c/i> is storytelling for daily news. KQED host Devin Katayama talks with reporters to help us make sense of what’s happening in the Bay Area. One story. One conversation. One idea.\r\n\r\n\u003cstrong>Subscribe to The Bay:\u003c/strong>\r\n\r\n\u003ca href=\"https://itunes.apple.com/us/podcast/the-bay/id1350043452?mt=2\">\u003cimg src=\"https://ww2.kqed.org/news/wp-content/uploads/sites/10/2018/01/Listen_on_Apple_Podcasts_sRGB_US-e1515635079510.png\" />\u003c/a>",
"taxonomy": "tag",
"headData": {
"twImgId": null,
"twTitle": null,
"ogTitle": null,
"ogImgId": null,
"twDescription": null,
"description": "Every good story starts local. So that’s where we start. The Bay is storytelling for daily news. KQED host Devin Katayama talks with reporters to help us make sense of what’s happening in the Bay Area. One story. One conversation. One idea. Subscribe to The Bay:",
"title": "The Bay Archives | KQED News",
"ogDescription": null
},
"ttid": 22615,
"slug": "the-bay",
"isLoading": false,
"link": "/news/tag/the-bay"
}
},
"userAgentReducer": {
"userAgent": "Mozilla/5.0 AppleWebKit/537.36 (KHTML, like Gecko; compatible; ClaudeBot/1.0; +claudebot@anthropic.com)",
"isBot": true
},
"userPermissionsReducer": {
"wpLoggedIn": false
},
"localStorageReducer": {},
"browserHistoryReducer": [],
"eventsReducer": {},
"fssReducer": {},
"tvDailyScheduleReducer": {},
"tvWeeklyScheduleReducer": {},
"tvPrimetimeScheduleReducer": {},
"tvMonthlyScheduleReducer": {},
"userAccountReducer": {
"user": {
"email": null,
"emailStatus": "EMAIL_UNVALIDATED",
"loggedStatus": "LOGGED_OUT",
"loggingChecked": false,
"articles": [],
"firstName": null,
"lastName": null,
"phoneNumber": null,
"fetchingMembership": false,
"membershipError": false,
"memberships": [
{
"id": null,
"startDate": null,
"firstName": null,
"lastName": null,
"familyNumber": null,
"memberNumber": null,
"memberSince": null,
"expirationDate": null,
"pfsEligible": false,
"isSustaining": false,
"membershipLevel": "Prospect",
"membershipStatus": "Non Member",
"lastGiftDate": null,
"renewalDate": null
}
]
},
"authModal": {
"isOpen": false,
"view": "LANDING_VIEW"
},
"error": null
},
"youthMediaReducer": {},
"checkPleaseReducer": {
"filterData": {},
"restaurantData": []
},
"location": {
"pathname": "/news/tag/artificial-intelligence",
"previousPathname": "/"
}
}