<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
        xmlns:news="http://www.google.com/schemas/sitemap-news/0.9">
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/anthropic-enterprise-services-blackstone-goldman/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>Anthropic s Blackstoneom, Hellman &amp; Friedmanom i Goldman Sachsom osniva enterprise AI uslužnu tvrtku za mid-market</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/arxiv-agentfloor-small-models-tools/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv AgentFloor: mali open-weight modeli (0,27B-32B) zadovoljavaju kratkoročne agentne zadatke, GPT-5 zadržava prednost samo u dugoročnom planiranju</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/arxiv-gui-sd-on-policy-self-distillation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv GUI-SD: prvi on-policy self-distillation framework za GUI grounding nadmašuje GRPO na šest benchmarkova u točnosti i efikasnosti treniranja</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/arxiv-saga-gpu-scheduling-agents/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv SAGA: workflow-atomic GPU scheduling za AI agente postiže 1,64× brže task completion na 64-GPU klasteru, prihvaćeno na HPDC 2026</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/arxiv-token-arena-energy-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv Token Arena: kontinuirani benchmark koji ujedinjuje energiju i kogniciju, otkriva 6,2× razliku u jouleima po točnom odgovoru između endpointa</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/arxiv-vlm-visual-jailbreak-icml-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv: Vizualne slike zaobilaze sigurnosne filtre vision-language modela u 40,9 % slučajeva, otkrivaju autori na ICML 2026</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/aws-agentcore-optimization-preview/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS Bedrock AgentCore Optimization u previewu: automatizirana petlja od produkcijskih traga do A/B testa s OpenTelemetry trace-ovima</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/cncf-github-actions-ci-security/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>CNCF: pinning na immutable digest, least-privilege tokeni i ephemeral runneri — recipe card za sigurniji GitHub Actions pipeline</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/nist-caisi-deepseek-v4-pro-evaluation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISI: DeepSeek V4 Pro je najsposobniji kineski AI model do sada, ali zaostaje 8 mjeseci za američkim frontierom</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/anthropic-enterprise-services-blackstone-goldman/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>Anthropic launches enterprise AI services company with Blackstone, Hellman &amp; Friedman, and Goldman Sachs for mid-market</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/arxiv-agentfloor-small-models-tools/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv AgentFloor: small open-weight models (0.27B–32B) are sufficient for short-horizon agent tasks; GPT-5 retains advantage only in long-horizon planning</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/arxiv-gui-sd-on-policy-self-distillation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv GUI-SD: first on-policy self-distillation framework for GUI grounding outperforms GRPO across six benchmarks in accuracy and training efficiency</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/arxiv-saga-gpu-scheduling-agents/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv SAGA: workflow-atomic GPU scheduling for AI agents achieves 1.64× faster task completion on a 64-GPU cluster, accepted at HPDC 2026</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/arxiv-token-arena-energy-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv Token Arena: continuous benchmark unifying energy and cognition reveals 6.2× difference in joules per correct answer across endpoints</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/arxiv-vlm-visual-jailbreak-icml-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv: Visual inputs bypass safety filters in vision-language models 40.9% of the time, ICML 2026 authors find</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/aws-agentcore-optimization-preview/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS Bedrock AgentCore Optimization in preview: automated loop from production traces to A/B tests via OpenTelemetry</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/cncf-github-actions-ci-security/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>CNCF: immutable digest pinning, least-privilege tokens, and ephemeral runners — a recipe card for a more secure GitHub Actions pipeline</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/nist-caisi-deepseek-v4-pro-evaluation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISI: DeepSeek V4 Pro is the most capable Chinese AI model to date, but trails US frontier by 8 months</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/anthropic-enterprise-services-blackstone-goldman/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>Anthropic gründet Enterprise-KI-Dienstleistungsunternehmen mit Blackstone, Hellman &amp; Friedman und Goldman Sachs für den Mid-Market</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/arxiv-agentfloor-small-models-tools/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv AgentFloor: Kleine Open-Weight-Modelle (0,27B–32B) reichen für kurzfristige Agenten-Aufgaben aus; GPT-5 behält Vorteil nur bei langfristiger Planung</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/arxiv-gui-sd-on-policy-self-distillation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv GUI-SD: Erstes On-Policy-Self-Distillation-Framework für GUI-Grounding übertrifft GRPO auf sechs Benchmarks in Genauigkeit und Trainingseffizienz</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/arxiv-saga-gpu-scheduling-agents/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv SAGA: Workflow-atomares GPU-Scheduling für KI-Agenten erreicht 1,64× schnellere Task-Completion auf 64-GPU-Cluster, angenommen auf HPDC 2026</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/arxiv-token-arena-energy-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv Token Arena: kontinuierlicher Benchmark für Energie und Kognition zeigt 6,2-fachen Unterschied in Joule pro korrekter Antwort zwischen Endpunkten</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/arxiv-vlm-visual-jailbreak-icml-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv: Visuelle Eingaben umgehen Sicherheitsfilter von Vision-Language-Modellen in 40,9 % der Fälle, zeigt ICML-2026-Studie</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/aws-agentcore-optimization-preview/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS Bedrock AgentCore Optimization in der Vorschau: automatisierte Schleife von Produktions-Traces bis A/B-Tests via OpenTelemetry</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/cncf-github-actions-ci-security/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>CNCF: Unveränderliches Digest-Pinning, Least-Privilege-Token und ephemere Runner — Rezeptkarte für sicherere GitHub-Actions-Pipelines</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/nist-caisi-deepseek-v4-pro-evaluation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISI: DeepSeek V4 Pro ist bisher fähigstes chinesisches KI-Modell, liegt aber 8 Monate hinter US-Frontier</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/anthropic-enterprise-services-blackstone-goldman/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>Anthropic联合Blackstone、Hellman &amp; Friedman和Goldman Sachs成立面向中端市场的企业AI服务公司</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/arxiv-agentfloor-small-models-tools/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv AgentFloor：小型开放权重模型（0.27B-32B）能胜任短期智能体任务，GPT-5仅在长期规划上保持优势</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/arxiv-gui-sd-on-policy-self-distillation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv GUI-SD：首个面向GUI定位的在线自蒸馏框架，在六个基准上超越GRPO强化学习</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/arxiv-saga-gpu-scheduling-agents/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv SAGA：AI智能体的工作流原子化GPU调度在64-GPU集群上实现1.64倍任务完成提速，被HPDC 2026接收</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/arxiv-token-arena-energy-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv Token Arena：统一能耗与认知的持续基准，揭示端点间每正确答案能耗6.2倍差距</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/arxiv-vlm-visual-jailbreak-icml-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv：视觉图像以40.9%的成功率绕过视觉语言模型安全过滤器，ICML 2026论文揭示</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/aws-agentcore-optimization-preview/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS Bedrock AgentCore Optimization进入预览：从生产追踪到A/B测试的自动化循环，基于OpenTelemetry追踪</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/cncf-github-actions-ci-security/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>CNCF：固定到不可变摘要、最小权限令牌和临时运行器——更安全的GitHub Actions管道实践指南</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/nist-caisi-deepseek-v4-pro-evaluation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISI：DeepSeek V4 Pro是迄今最强中国AI模型，但落后美国前沿约8个月</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/anthropic-enterprise-services-blackstone-goldman/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AnthropicがBlackstone、Hellman &amp; Friedman、Goldman Sachsと中堅市場向けエンタープライズAIサービス会社を設立</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/arxiv-agentfloor-small-models-tools/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv AgentFloor：小型オープンウェイトモデル（0.27B-32B）が短期エージェントタスクに十分、GPT-5は長期計画のみで優位を維持</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/arxiv-gui-sd-on-policy-self-distillation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv GUI-SD：GUIグラウンディング向け初のオンポリシー自己蒸留フレームワーク、6つのベンチマークでGRPO強化学習を凌駕</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/arxiv-saga-gpu-scheduling-agents/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv SAGA：AIエージェント向けワークフロー原子化GPUスケジューリング、64-GPUクラスターでタスク完了を1.64倍高速化、HPDC 2026採択</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/arxiv-token-arena-energy-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv Token Arena：エネルギーと認知を統合する継続的ベンチマーク、エンドポイント間で正解あたりエネルギーの6.2倍の差を発見</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/arxiv-vlm-visual-jailbreak-icml-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv：視覚画像がVLMの安全フィルターを40.9%の確率で回避、ICML 2026論文が明らかに</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/aws-agentcore-optimization-preview/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS Bedrock AgentCore Optimizationがプレビュー公開：OpenTelemetryトレースで本番環境からA/Bテストまでの自動化ループを実現</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/cncf-github-actions-ci-security/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>CNCF：不変ダイジェストへのピン留め、最小権限トークン、エフェメラルランナー——より安全なGitHub ActionsパイプラインへのレシピカードCNCF発表</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/nist-caisi-deepseek-v4-pro-evaluation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISI：DeepSeek V4 Proはこれまで評価した中で最も優れた中国AIモデルだが、米国フロンティアに8ヶ月遅れ</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/anthropic-enterprise-services-blackstone-goldman/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>Anthropic, Blackstone·Hellman &amp; Friedman·Goldman Sachs와 중소기업 대상 엔터프라이즈 AI 서비스 회사 설립</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/arxiv-agentfloor-small-models-tools/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv AgentFloor：소형 오픈웨이트 모델(0.27B-32B)이 단기 에이전트 작업에 충분, GPT-5는 장기 계획에서만 우위</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/arxiv-gui-sd-on-policy-self-distillation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv GUI-SD：GUI 그라운딩을 위한 최초의 온폴리시 자기 증류 프레임워크, 6개 벤치마크에서 GRPO 강화학습 능가</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/arxiv-saga-gpu-scheduling-agents/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv SAGA：AI 에이전트를 위한 워크플로우 원자화 GPU 스케줄링, 64-GPU 클러스터에서 작업 완료 1.64배 단축, HPDC 2026 채택</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/arxiv-token-arena-energy-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv Token Arena：에너지와 인지를 통합한 지속적 벤치마크, 엔드포인트 간 정답당 에너지 6.2배 차이 발견</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/arxiv-vlm-visual-jailbreak-icml-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv：시각 이미지가 VLM 안전 필터를 40.9% 확률로 우회, ICML 2026 논문 공개</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/aws-agentcore-optimization-preview/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS Bedrock AgentCore Optimization 프리뷰 출시：OpenTelemetry 트레이스로 생산에서 A/B 테스트까지 자동화 루프 구현</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/cncf-github-actions-ci-security/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>CNCF：불변 다이제스트 고정, 최소 권한 토큰, 임시 러너——더 안전한 GitHub Actions 파이프라인을 위한 레시피 카드</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/nist-caisi-deepseek-v4-pro-evaluation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISI：DeepSeek V4 Pro, 지금까지 평가된 최강 중국 AI 모델이지만 미국 프런티어에 8개월 뒤처져</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/arxiv-adamezo-zeroth-order-llm-finetuning/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>AdaMeZO: fino ugađanje LLM-ova Adam-stilom bez pohrane momenata u GPU memoriji</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/arxiv-aem-adaptive-entropy-modulation-rl-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv AEM: adaptivna modulacija entropije za multi-turn RL agente postiže +1,4 % na SWE-bench Verified</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/arxiv-armor-2025-vojni-llm-sigurnost/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv ARMOR 2025: prvi vojni benchmark za LLM sigurnost s 519 promptova kroz 21 komercijalni model</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/arxiv-bayes-consistent-agentic-orchestration-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>Position paper s 30 autora na ICML 2026: orkestracija agentnih AI sustava mora biti Bayes-konzistentna</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/arxiv-bwla-w1ax-kvantizacija-llm/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>BWLA: 1-bitna kvantizacija LLM-ova s 3,26× ubrzanjem i 70% boljim rezultatima (ACL 2026)</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/arxiv-stable-gflownet-llm-red-teaming-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026 Spotlight: Stable-GFlowNet uvodi stabilnije i raznovrsnije automatizirano red-teamanje LLM-ova</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/arxiv-tool-calling-framework-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv okvir &apos;To Call or Not to Call&apos; otkriva da LLM-ovi pogrešno procjenjuju kad im trebaju vanjski alati</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/arxiv-tool-use-tax-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv: skriveni trošak alata u LLM agentima — &quot;tool-use tax&quot; smanjuje točnost čak i kad alati pomažu</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/ibm-ceo-study-c-suite-ai-restrukturiranje/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>IBM studija: 76 % organizacija ima Chief AI Officera, CEO-i očekuju 48 % autonomnih AI odluka do 2030.</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/arxiv-adamezo-zeroth-order-llm-finetuning/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>AdaMeZO: Adam-style LLM fine-tuning without storing gradient moments in GPU memory</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/arxiv-aem-adaptive-entropy-modulation-rl-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv AEM: Adaptive Entropy Modulation for multi-turn RL agents achieves +1.4% on SWE-bench Verified</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/arxiv-armor-2025-vojni-llm-sigurnost/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv ARMOR 2025: first military LLM safety benchmark with 519 prompts across 21 commercial models</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/arxiv-bayes-consistent-agentic-orchestration-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>Position paper by 30 authors at ICML 2026: agentic AI orchestration must be Bayes-consistent</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/arxiv-bwla-w1ax-kvantizacija-llm/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>BWLA: 1-bit LLM quantization with 3.26× speedup and 70% better results (ACL 2026)</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/arxiv-stable-gflownet-llm-red-teaming-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026 Spotlight: Stable-GFlowNet introduces more stable and diverse automated LLM red-teaming</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/arxiv-tool-calling-framework-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv &apos;To Call or Not to Call&apos; framework reveals LLMs misjudge when they need external tools</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/arxiv-tool-use-tax-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv: the hidden cost of tools in LLM agents — &apos;tool-use tax&apos; reduces accuracy even when tools help</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/ibm-ceo-study-c-suite-ai-restrukturiranje/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>IBM study: 76% of organizations have a Chief AI Officer, CEOs expect 48% autonomous AI decisions by 2030</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/arxiv-adamezo-zeroth-order-llm-finetuning/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>AdaMeZO: Adam-Stil LLM-Fine-Tuning ohne Speicherung von Gradientenmomenten im GPU-Speicher</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/arxiv-aem-adaptive-entropy-modulation-rl-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv AEM: Adaptive Entropiemodulation für Multi-Turn-RL-Agenten erreicht +1,4 % auf SWE-bench Verified</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/arxiv-armor-2025-vojni-llm-sigurnost/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv ARMOR 2025: erster militärischer LLM-Sicherheitsbenchmark mit 519 Prompts über 21 kommerzielle Modelle</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/arxiv-bayes-consistent-agentic-orchestration-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>Position Paper von 30 Autoren auf ICML 2026: Orchestrierung agentischer KI-Systeme muss Bayes-konsistent sein</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/arxiv-bwla-w1ax-kvantizacija-llm/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>BWLA: 1-Bit-Quantisierung von Sprachmodellen mit 3,26-facher Beschleunigung und 70 % besseren Ergebnissen (ACL 2026)</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/arxiv-stable-gflownet-llm-red-teaming-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026 Spotlight: Stable-GFlowNet führt stabileres und vielfältigeres automatisiertes Red-Teaming von Sprachmodellen ein</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/arxiv-tool-calling-framework-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv-Rahmen &apos;To Call or Not to Call&apos; zeigt: Sprachmodelle beurteilen falsch, wann sie externe Werkzeuge brauchen</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/arxiv-tool-use-tax-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv: die versteckten Kosten von Werkzeugen in LLM-Agenten — &apos;Tool-Use Tax&apos; senkt Genauigkeit selbst wenn Werkzeuge helfen</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/ibm-ceo-study-c-suite-ai-restrukturiranje/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>IBM-Studie: 76 % der Unternehmen haben einen Chief AI Officer, CEOs erwarten 48 % autonome KI-Entscheidungen bis 2030</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/arxiv-adamezo-zeroth-order-llm-finetuning/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>AdaMeZO：以类Adam方式微调LLM，无需在GPU内存中存储动量</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/arxiv-aem-adaptive-entropy-modulation-rl-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv AEM：多轮RL智能体的自适应熵调制在SWE-bench Verified上提升+1.4%</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/arxiv-armor-2025-vojni-llm-sigurnost/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv ARMOR 2025：519个提示词测试21个商业LLM的军事安全性基准</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/arxiv-bayes-consistent-agentic-orchestration-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026 立场论文：30位作者认为智能体AI系统的编排必须符合贝叶斯一致性</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/arxiv-bwla-w1ax-kvantizacija-llm/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>BWLA：1位量化LLM实现3.26倍加速和70%更好结果（ACL 2026）</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/arxiv-stable-gflownet-llm-red-teaming-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026 Spotlight：Stable-GFlowNet引入更稳定、更多样化的LLM自动化红队测试</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/arxiv-tool-calling-framework-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv框架「是否调用」揭示LLM错误判断何时需要外部工具</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/arxiv-tool-use-tax-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv：LLM智能体工具的隐性成本——「工具使用税」即使工具有帮助也会降低准确性</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/ibm-ceo-study-c-suite-ai-restrukturiranje/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>IBM研究：76%的组织有首席AI官，CEO预计到2030年48%的运营决策将由AI自主作出</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/arxiv-adamezo-zeroth-order-llm-finetuning/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>AdaMeZO：GPU メモリにモーメントを保存せずAdam方式でLLMをファインチューニング</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/arxiv-aem-adaptive-entropy-modulation-rl-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv AEM：マルチターンRL エージェントの適応的エントロピー変調がSWE-bench Verifiedで+1.4%を達成</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/arxiv-armor-2025-vojni-llm-sigurnost/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv ARMOR 2025：519の軍事プロンプトで21の商用LLMの安全性を評価する初の軍事ベンチマーク</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/arxiv-bayes-consistent-agentic-orchestration-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026立場論文：30名の著者がエージェントAIのオーケストレーションはベイズ一貫性を持つべきと主張</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/arxiv-bwla-w1ax-kvantizacija-llm/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>BWLA：1ビット量子化LLMで3.26倍の高速化と70%の改善を達成（ACL 2026）</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/arxiv-stable-gflownet-llm-red-teaming-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026 Spotlight：Stable-GFlowNetがより安定した多様なLLM自動レッドチーミングを実現</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/arxiv-tool-calling-framework-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXivフレームワーク「呼ぶべきか否か」がLLMの外部ツール判断ミスを明らかに</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/arxiv-tool-use-tax-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv：LLMエージェントのツールの隠れたコスト——「ツール使用税」はツールが役立つ時でも精度を下げる</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/ibm-ceo-study-c-suite-ai-restrukturiranje/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>IBM調査：76%の組織がChief AI Officerを設置、CEOは2030年までにAIが48%の運営決定を自律的に下すと予測</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/arxiv-adamezo-zeroth-order-llm-finetuning/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>AdaMeZO: GPU 메모리에 모멘트를 저장하지 않고 Adam 방식으로 LLM 파인튜닝하는 새 최적화기</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/arxiv-aem-adaptive-entropy-modulation-rl-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv AEM: 멀티턴 RL 에이전트를 위한 적응형 엔트로피 변조, SWE-bench Verified에서 +1.4% 향상</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/arxiv-armor-2025-vojni-llm-sigurnost/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv ARMOR 2025: 519개 프롬프트로 21개 상용 LLM의 군사 안전성을 평가한 최초의 군사 벤치마크</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/arxiv-bayes-consistent-agentic-orchestration-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026 포지션 페이퍼: 저자 30명, 에이전트 AI 오케스트레이션은 베이즈 일관성을 가져야 한다</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/arxiv-bwla-w1ax-kvantizacija-llm/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>BWLA: 1비트 양자화 LLM으로 3.26배 가속 및 70% 향상 달성 (ACL 2026)</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/arxiv-stable-gflownet-llm-red-teaming-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026 Spotlight: Stable-GFlowNet, 더 안정적이고 다양한 LLM 자동화 레드팀 테스트 도입</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/arxiv-tool-calling-framework-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv 프레임워크 &apos;호출할 것인가 말 것인가&apos;: LLM이 외부 도구 필요성을 잘못 판단한다는 것을 밝혀</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/arxiv-tool-use-tax-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv: LLM 에이전트 도구의 숨겨진 비용 - &apos;도구 사용세&apos;는 도구가 도움이 될 때도 정확도를 낮춘다</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/ibm-ceo-study-c-suite-ai-restrukturiranje/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>IBM 연구: 조직의 76%가 최고AI책임자를 보유, CEO들은 2030년까지 AI가 운영 결정의 48%를 자율적으로 내릴 것으로 예상</news:title>
    </news:news>
  </url>
</urlset>
