<?xml version="1.0"?>
<News hasArchived="false" page="1" pageCount="1" pageSize="10" timestamp="Mon, 20 Apr 2026 14:10:05 -0400" url="https://dev.my.umbc.edu/groups/umbc-ai/posts.xml?tag=risk">
  <NewsItem contentIssues="false" id="152032" important="false" status="posted" url="https://dev.my.umbc.edu/groups/umbc-ai/posts/152032">
    <Title>Talk: Existing Risks of Generative AI and Surveying Public Perceptions</Title>
    <Tagline>online 11-12 EDT Fri. Sept 5 by CWIT Alum Wendy Bickersteth</Tagline>
    <Body>
      <![CDATA[
          <div class="html-content"><span><p><span>UMBC <a href="https://cwit.umbc.edu/" rel="nofollow external" class="bo">CWIT</a> alumna <a href="https://www.linkedin.com/in/wendy-bickersteth-15b374219/" rel="nofollow external" class="bo">Wendy Bickersteth</a> will give an online talk on </span><span>Existing Risks of Generative AI and Surveying Public Perceptions </span><span>on Friday, September 5, 2025, from 11am-12pm EDT. </span><span>She is currently a Societal Computing PhD student at the Carnegie Mellon University </span><a href="https://www.cylab.cmu.edu/" rel="nofollow external" class="bo"><span>CyLab Security and Privacy Institute</span></a><span>.</span></p><p><span>Working with Dr. Lorrie Cranor, she conducts research on usable privacy and security, focusing on privacy labels and the use of AI. Read more about this work in this recent </span><a href="https://arxiv.org/abs/2505.22073" rel="nofollow external" class="bo"><span>paper</span></a><span>.</span></p><p><span>Join the online event via this </span><a href="https://my3.my.umbc.edu/groups/cwitaffiliates/events/145564/join_meeting" rel="nofollow external" class="bo"><span>WebEx link</span></a><span>.</span></p><div><span><br></span></div></span></div>
      ]]>
    </Body>
    <Summary>UMBC CWIT alumna Wendy Bickersteth will give an online talk on Existing Risks of Generative AI and Surveying Public Perceptions on Friday, September 5, 2025, from 11am-12pm EDT. She is currently a...</Summary>
    <TrackingUrl>https://dev.my.umbc.edu/api/v0/pixel/news/152032/guest@my.umbc.edu/589e8ee69a1f8c2ab61da78072e1b7ea/api/pixel</TrackingUrl>
    <Tag>ai</Tag>
    <Tag>privacy</Tag>
    <Tag>risk</Tag>
    <Tag>security</Tag>
    <Group token="umbc-ai">UMBC AI</Group>
    <GroupUrl>https://dev.my.umbc.edu/groups/umbc-ai</GroupUrl>
    <AvatarUrl>https://assets4-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/xsmall.png?1691095779</AvatarUrl>
    <AvatarUrl size="original">https://assets2-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/original.png?1691095779</AvatarUrl>
    <AvatarUrl size="xxlarge">https://assets1-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/xxlarge.png?1691095779</AvatarUrl>
    <AvatarUrl size="xlarge">https://assets1-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/xlarge.png?1691095779</AvatarUrl>
    <AvatarUrl size="large">https://assets1-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/large.png?1691095779</AvatarUrl>
    <AvatarUrl size="medium">https://assets3-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/medium.png?1691095779</AvatarUrl>
    <AvatarUrl size="small">https://assets3-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/small.png?1691095779</AvatarUrl>
    <AvatarUrl size="xsmall">https://assets4-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/xsmall.png?1691095779</AvatarUrl>
    <AvatarUrl size="xxsmall">https://assets1-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/xxsmall.png?1691095779</AvatarUrl>
    <Sponsor>UMBC AI</Sponsor>
    <ThumbnailUrl size="xxlarge">https://assets4-dev.my.umbc.edu/system/shared/thumbnails/news/000/152/032/db5e90221899158f8077ab439daf4106/xxlarge.jpg?1756991147</ThumbnailUrl>
    <ThumbnailUrl size="xlarge">https://assets2-dev.my.umbc.edu/system/shared/thumbnails/news/000/152/032/db5e90221899158f8077ab439daf4106/xlarge.jpg?1756991147</ThumbnailUrl>
    <ThumbnailUrl size="large">https://assets3-dev.my.umbc.edu/system/shared/thumbnails/news/000/152/032/db5e90221899158f8077ab439daf4106/large.jpg?1756991147</ThumbnailUrl>
    <ThumbnailUrl size="medium">https://assets4-dev.my.umbc.edu/system/shared/thumbnails/news/000/152/032/db5e90221899158f8077ab439daf4106/medium.jpg?1756991147</ThumbnailUrl>
    <ThumbnailUrl size="small">https://assets1-dev.my.umbc.edu/system/shared/thumbnails/news/000/152/032/db5e90221899158f8077ab439daf4106/small.jpg?1756991147</ThumbnailUrl>
    <ThumbnailUrl size="xsmall">https://assets1-dev.my.umbc.edu/system/shared/thumbnails/news/000/152/032/db5e90221899158f8077ab439daf4106/xsmall.jpg?1756991147</ThumbnailUrl>
    <ThumbnailUrl size="xxsmall">https://assets4-dev.my.umbc.edu/system/shared/thumbnails/news/000/152/032/db5e90221899158f8077ab439daf4106/xxsmall.jpg?1756991147</ThumbnailUrl>
    <ThumbnailAltText>Wendy Bickersteth give talk.</ThumbnailAltText>
    <PawCount>3</PawCount>
    <CommentCount>0</CommentCount>
    <CommentsAllowed>true</CommentsAllowed>
    <PostedAt>Thu, 04 Sep 2025 09:16:48 -0400</PostedAt>
    <EditAt>Thu, 04 Sep 2025 09:22:08 -0400</EditAt>
  </NewsItem>
  <NewsItem contentIssues="false" id="147328" important="false" status="posted" url="https://dev.my.umbc.edu/groups/umbc-ai/posts/147328">
  <Title>Talk: Unveiling Privacy Risks in AI: Data, Models, and Systems</Title>
  <Tagline>11:30-12:30 &#8203;Friday, February 14 in ITE325b and online&#8203;</Tagline>
  <Body>
    <![CDATA[
    <div class="html-content"><div><span><a href="https://www.cs.purdue.edu/homes/an93/" rel="nofollow external" class="bo">​Shengwei An</a> will give a talk on </span><strong>Unveiling Privacy Risks in AI: Data, Models, &amp; Systems</strong>, 11:30-12:30 <span>​Friday, February 14 in </span>ITE325b and <a href="https://my3.my.umbc.edu/groups/csee/events/140124/join_meeting" rel="nofollow external" class="bo">online</a><span>​.</span></div><div><br></div>Artificial Intelligence has become deeply integrated into diverse systems, transforming industries and reshaping our daily lives. However, this widespread adoption also introduces critical privacy risks across the training data, AI models, and AI-powered systems. This talk will explore privacy challenges through these three aspects. First, I will introduce the first high-fidelity attack that exposes the privacy vulnerabilities of training data in pre-trained models and commercial AI services. Next, I will present a novel physical impersonating attack that highlights the privacy risks inherent in AI-based authentication systems. Additionally, I will discuss the first data-free framework designed to eliminate trigger-based model watermarks in diffusion models that aim to protect their intellectual property. Finally, I will conclude with a forward-looking perspective on addressing privacy risks in emerging generative AI techniques, such as Large Language Models and Stable Diffusion Models.<div><br><div><div><span><p><a href="https://www.cs.purdue.edu/homes/an93/" rel="nofollow external" class="bo"> Shengwei An</a> is a Ph.D. candidate in the Department of Computer Science at Purdue University, advised by Prof. Xiangyu Zhang. His research focuses on AI security and privacy, with an emphasis on designing state-of-the-art tools to investigate and mitigate privacy vulnerabilities in real-world AI systems. His work has been published in top-tier conferences, including S&amp;P, USENIX Security, NDSS, and AAAI. He is the recipient of the Ross Fellowship from Purdue University and the Best Paper Award in the ECCV 2022 AROW Workshop.</p></span></div></div></div>
    <hr><a href="https://ai.umbc.edu/" rel="nofollow external" class="bo"><strong>UMBC Center for AI</strong></a></div>
]]>
  </Body>
  <Summary>​Shengwei An will give a talk on Unveiling Privacy Risks in AI: Data, Models, &amp; Systems, 11:30-12:30 ​Friday, February 14 in ITE325b and online​.    Artificial Intelligence has become deeply...</Summary>
  <TrackingUrl>https://dev.my.umbc.edu/api/v0/pixel/news/147328/guest@my.umbc.edu/230c5403cb93f1dda1c511207766e387/api/pixel</TrackingUrl>
  <Tag>ai</Tag>
  <Tag>privacy</Tag>
  <Tag>risk</Tag>
  <Group token="umbc-ai">UMBC AI</Group>
  <GroupUrl>https://dev.my.umbc.edu/groups/umbc-ai</GroupUrl>
  <AvatarUrl>https://assets4-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/xsmall.png?1691095779</AvatarUrl>
  <AvatarUrl size="original">https://assets2-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/original.png?1691095779</AvatarUrl>
  <AvatarUrl size="xxlarge">https://assets1-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/xxlarge.png?1691095779</AvatarUrl>
  <AvatarUrl size="xlarge">https://assets1-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/xlarge.png?1691095779</AvatarUrl>
  <AvatarUrl size="large">https://assets1-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/large.png?1691095779</AvatarUrl>
  <AvatarUrl size="medium">https://assets3-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/medium.png?1691095779</AvatarUrl>
  <AvatarUrl size="small">https://assets3-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/small.png?1691095779</AvatarUrl>
  <AvatarUrl size="xsmall">https://assets4-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/xsmall.png?1691095779</AvatarUrl>
  <AvatarUrl size="xxsmall">https://assets1-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/xxsmall.png?1691095779</AvatarUrl>
  <Sponsor>UMBC AI</Sponsor>
  <PawCount>0</PawCount>
  <CommentCount>0</CommentCount>
  <CommentsAllowed>true</CommentsAllowed>
  <PostedAt>Thu, 13 Feb 2025 17:35:09 -0500</PostedAt>
  <EditAt>Fri, 14 Feb 2025 09:41:00 -0500</EditAt>
</NewsItem>
  <NewsItem contentIssues="true" id="144922" important="false" status="posted" url="https://dev.my.umbc.edu/groups/umbc-ai/posts/144922">
    <Title>Talk: Responsible AI and AI Risk Management Frameworks, 12pm 10/24</Title>
    <Tagline>AI Lunchbox, 12-1 pm EDT on October 24, 2024</Tagline>
    <Body>
      <![CDATA[
          <div class="html-content"><div><img src="https://ai.umbc.edu/wp-content/uploads/sites/734/2024/10/600_523938704.jpg" style="max-width: 100%; height: auto;"></div><div><br></div><div><span>In the next <a href="https://www.umbctraining.com/" rel="nofollow external" class="bo"><strong>UMBC Training Centers</strong></a> </span><span>AI Lunchbox session,</span><span> <a href="https://www.linkedin.com/in/ed-melick-7a683/" rel="nofollow external" class="bo"><strong>Ed Melick</strong></a> will discuss Responsible AI and AI Risk Management Frameworks, 12-1 pm EDT on October 24, 2024. </span></div><div><span><br></span></div><div><span>Many Responsible AI (RAI) and AI risk management frameworks have been developed, and the number is growing rapidly. Variations among these frameworks can be significant. This has led to some confusion and hesitance among organizations wanting to implement RAI programs to ensure the safe and ethical development, deployment, and use of AI systems. </span></div><div><span><br></span></div><div><span>Join Ed as he discusses C4AI’s effort to build a definitive database of publicly available frameworks, risk repositories, incident databases, and related resources. </span></div><div><span><br></span></div><div><span><a href="https://www.meetup.com/c4a-ai/events/303883918/" rel="nofollow external" class="bo"><strong>Register here</strong></a> for this free online meeting to get the link.</span></div><div><span><br></span></div> <hr><a href="https://ai.umbc.edu/" rel="nofollow external" class="bo"><strong>UMBC Center for AI</strong></a></div>
      ]]>
    </Body>
    <Summary>In the next UMBC Training Centers AI Lunchbox session, Ed Melick will discuss Responsible AI and AI Risk Management Frameworks, 12-1 pm EDT on October 24, 2024.      Many Responsible AI (RAI) and...</Summary>
    <Website>https://www.meetup.com/c4a-ai/events/303883918/</Website>
    <TrackingUrl>https://dev.my.umbc.edu/api/v0/pixel/news/144922/guest@my.umbc.edu/3129f7419313b46cd0a66dd16ee27243/api/pixel</TrackingUrl>
    <Tag>ai</Tag>
    <Tag>risk</Tag>
    <Group token="umbc-ai">UMBC AI</Group>
    <GroupUrl>https://dev.my.umbc.edu/groups/umbc-ai</GroupUrl>
    <AvatarUrl>https://assets4-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/xsmall.png?1691095779</AvatarUrl>
    <AvatarUrl size="original">https://assets2-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/original.png?1691095779</AvatarUrl>
    <AvatarUrl size="xxlarge">https://assets1-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/xxlarge.png?1691095779</AvatarUrl>
    <AvatarUrl size="xlarge">https://assets1-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/xlarge.png?1691095779</AvatarUrl>
    <AvatarUrl size="large">https://assets1-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/large.png?1691095779</AvatarUrl>
    <AvatarUrl size="medium">https://assets3-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/medium.png?1691095779</AvatarUrl>
    <AvatarUrl size="small">https://assets3-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/small.png?1691095779</AvatarUrl>
    <AvatarUrl size="xsmall">https://assets4-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/xsmall.png?1691095779</AvatarUrl>
    <AvatarUrl size="xxsmall">https://assets1-dev.my.umbc.edu/system/shared/avatars/groups/000/002/081/cfb27ebe008c2636486089a759ea5c36/xxsmall.png?1691095779</AvatarUrl>
    <Sponsor>UMBC Training Centers</Sponsor>
    <PawCount>0</PawCount>
    <CommentCount>0</CommentCount>
    <CommentsAllowed>true</CommentsAllowed>
    <PostedAt>Sat, 19 Oct 2024 18:58:25 -0400</PostedAt>
    <EditAt>Sat, 19 Oct 2024 19:00:09 -0400</EditAt>
  </NewsItem>
</News>
