<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
        xmlns:news="http://www.google.com/schemas/sitemap-news/0.9">
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-07/anthropic-managed-agents-multiagent-beta/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>Anthropic: Managed Agents dobivaju multiagent sesije, Outcomes, webhooks i vault refresh u javnoj beti</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-07/anthropic-spacex-300mw-claude-limits/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>Anthropic: SpaceX postaje compute partner s 300 MW i dvostrukim Claude Code limitima</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-07/arxiv-2605-03195-terminus-4b-terminal/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03195: Terminus-4B — 4 milijarde parametara za terminal execution izjednačava Claude Opus i GPT-5.3-Codex na SWE-Bench Pro uz ~30 % niže tokene glavnog agenta</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-07/arxiv-2605-04012-symptomai-fitbit-clinicians/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04012: SymptomAI u Fitbit aplikaciji s 13.917 pacijenata nadmašuje nezavisne kliničare u diferencijalnoj dijagnozi</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-07/arxiv-2605-04019-red-teaming-llama-scout/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04019: automatizirani red teaming agent postiže 85 % uspjeha protiv Mete Llama Scout uz 45+ napada i 450+ transformacija</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-07/github-agentic-validation-compiler-theory/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>GitHub: validacija agentskog ponašanja preko dominator analize iz teorije kompilatora postiže 100 % točnost vs 82 % agent self-assessment</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-07/nvidia-spectrum-x-mrc-open-standard/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>NVIDIA: Spectrum-X Multipath Reliable Connection postaje OCP otvoreni standard za gigascale AI mreže</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-07/vllm-mooncake-kv-cache-store/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>vLLM: integracija Mooncake distributed KV cache storea donosi 3,8× veći throughput i 46× nižu P50 TTFT za multi-turn agentske workloade</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-07/anthropic-managed-agents-multiagent-beta/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>Anthropic: Managed Agents gain multiagent sessions, Outcomes, webhooks and vault refresh in public beta</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-07/anthropic-spacex-300mw-claude-limits/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>Anthropic: SpaceX becomes compute partner with 300 MW and doubled Claude Code limits</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-07/arxiv-2605-03195-terminus-4b-terminal/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03195: Terminus-4B — 4 billion parameters for terminal execution matches Claude Opus and GPT-5.3-Codex on SWE-Bench Pro with ~30% fewer main agent tokens</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-07/arxiv-2605-04012-symptomai-fitbit-clinicians/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04012: SymptomAI in the Fitbit app with 13,917 patients outperforms independent clinicians in differential diagnosis</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-07/arxiv-2605-04019-red-teaming-llama-scout/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04019: automated red teaming agent achieves 85% success rate against Meta Llama Scout with 45+ attacks and 450+ transformations</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-07/github-agentic-validation-compiler-theory/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>GitHub: validation of agentic behavior via dominator analysis from compiler theory achieves 100% accuracy vs 82% agent self-assessment</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-07/nvidia-spectrum-x-mrc-open-standard/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>NVIDIA: Spectrum-X Multipath Reliable Connection becomes OCP open standard for gigascale AI networks</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-07/vllm-mooncake-kv-cache-store/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>vLLM: Mooncake distributed KV cache store integration delivers 3.8× higher throughput and 46× lower P50 TTFT for multi-turn agentic workloads</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-07/anthropic-managed-agents-multiagent-beta/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>Anthropic: Managed Agents erhalten Multiagenten-Sessions, Outcomes, Webhooks und Vault-Refresh in der öffentlichen Beta</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-07/anthropic-spacex-300mw-claude-limits/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>Anthropic: SpaceX wird Compute-Partner mit 300 MW und verdoppelten Claude Code-Limits</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-07/arxiv-2605-03195-terminus-4b-terminal/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03195: Terminus-4B — 4 Milliarden Parameter für Terminal-Execution auf Augenhöhe mit Claude Opus und GPT-5.3-Codex bei SWE-Bench Pro mit ~30 % weniger Haupt-Agent-Token</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-07/arxiv-2605-04012-symptomai-fitbit-clinicians/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04012: SymptomAI in der Fitbit-App übertrifft mit 13.917 Patienten unabhängige Kliniker bei der Differentialdiagnose</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-07/arxiv-2605-04019-red-teaming-llama-scout/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04019: Automatisierter Red-Teaming-Agent erreicht 85 % Erfolgsquote gegen Metas Llama Scout mit 45+ Angriffen und 450+ Transformationen</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-07/github-agentic-validation-compiler-theory/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>GitHub: Validierung agentischen Verhaltens per Dominatoranalyse aus der Compilertheorie erreicht 100 % Genauigkeit vs. 82 % Agenten-Selbstbeurteilung</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-07/nvidia-spectrum-x-mrc-open-standard/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>NVIDIA: Spectrum-X Multipath Reliable Connection wird OCP-Offenstandard für Gigascale-KI-Netzwerke</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-07/vllm-mooncake-kv-cache-store/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>vLLM: Mooncake Distributed KV-Cache-Store-Integration liefert 3,8× höheren Durchsatz und 46× niedrigere P50 TTFT für Multi-Turn-Agenten-Workloads</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-07/anthropic-managed-agents-multiagent-beta/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>Anthropic：Managed Agents在公开测试版中获得多智能体会话、Outcomes、Webhooks和Vault刷新功能</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-07/anthropic-spacex-300mw-claude-limits/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>Anthropic：SpaceX成为算力合作伙伴，提供300 MW并将Claude Code速率限制翻倍</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-07/arxiv-2605-03195-terminus-4b-terminal/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03195: Terminus-4B——40亿参数终端执行模型在SWE-Bench Pro上与Claude Opus和GPT-5.3-Codex持平，主智能体Token消耗降低约30%</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-07/arxiv-2605-04012-symptomai-fitbit-clinicians/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04012: SymptomAI在Fitbit应用中以约13,917名患者为样本，鉴别诊断准确性优于独立临床医生</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-07/arxiv-2605-04019-red-teaming-llama-scout/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04019: 自动化红队测试智能体对Meta Llama Scout攻击成功率达85%，含45+种攻击和450+种变换</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-07/github-agentic-validation-compiler-theory/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>GitHub：借助编译器理论的支配节点分析验证智能体行为，准确率达100%，优于智能体自评估的82%</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-07/nvidia-spectrum-x-mrc-open-standard/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>NVIDIA：Spectrum-X多路径可靠连接成为OCP开放标准，面向超大规模AI网络</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-07/vllm-mooncake-kv-cache-store/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>vLLM：集成Mooncake分布式KV缓存存储，多轮智能体工作负载吞吐量提升3.8倍、P50首token时延降低46倍</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-07/anthropic-managed-agents-multiagent-beta/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>Anthropic：Managed Agentsがマルチエージェントセッション、Outcomes、Webhooks、Vault更新をパブリックベータで追加</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-07/anthropic-spacex-300mw-claude-limits/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>Anthropic：SpaceXをコンピュートパートナーに迎え、300 MWの算力とClaude Codeのレート制限倍増を発表</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-07/arxiv-2605-03195-terminus-4b-terminal/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03195: Terminus-4B——40億パラメータのターミナル実行モデルがSWE-Bench ProでClaude OpusとGPT-5.3-Codexに匹敵し、メインエージェントのトークン使用量を約30%削減</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-07/arxiv-2605-04012-symptomai-fitbit-clinicians/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04012: SymptomAIがFitbitアプリで約13,917名の患者を対象に鑑別診断で独立した臨床医を上回る</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-07/arxiv-2605-04019-red-teaming-llama-scout/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04019: 自動化レッドチームエージェントがMeta Llama Scoutに対して85%の成功率を達成、45以上の攻撃と450以上の変換を使用</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-07/github-agentic-validation-compiler-theory/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>GitHub：コンパイラ理論の支配節点解析でエージェント動作を検証、精度100% vs エージェント自己評価82%</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-07/nvidia-spectrum-x-mrc-open-standard/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>NVIDIA：Spectrum-X マルチパス信頼性接続がOCPオープン標準となり、ギガスケールAIネットワークへ</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-07/vllm-mooncake-kv-cache-store/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>vLLM：Mooncake分散KVキャッシュストアの統合でマルチターンエージェントのスループット3.8倍、P50 TTFT 46倍改善</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-07/anthropic-managed-agents-multiagent-beta/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>Anthropic: Managed Agents, 멀티에이전트 세션·Outcomes·Webhooks·Vault 갱신 기능을 퍼블릭 베타로 추가</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-07/anthropic-spacex-300mw-claude-limits/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>Anthropic: SpaceX를 컴퓨트 파트너로 영입, 300 MW 용량 확보 및 Claude Code 속도 제한 두 배 확대</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-07/arxiv-2605-03195-terminus-4b-terminal/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03195: Terminus-4B — 40억 파라미터 터미널 실행 모델이 SWE-Bench Pro에서 Claude Opus·GPT-5.3-Codex와 동등, 주 에이전트 토큰 약 30% 절감</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-07/arxiv-2605-04012-symptomai-fitbit-clinicians/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04012: SymptomAI가 Fitbit 앱에서 약 13,917명 환자 대상으로 감별 진단에서 독립 임상의를 능가</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-07/arxiv-2605-04019-red-teaming-llama-scout/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04019: 자동화 레드팀 에이전트가 Meta Llama Scout 대상 85% 성공률 달성, 공격 45종·변환 450종 이상 사용</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-07/github-agentic-validation-compiler-theory/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>GitHub: 컴파일러 이론의 지배자 분석으로 에이전트 동작 검증, 정확도 100% vs 에이전트 자기 평가 82%</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-07/nvidia-spectrum-x-mrc-open-standard/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>NVIDIA: Spectrum-X 다중 경로 신뢰 연결이 기가급 AI 네트워크를 위한 OCP 개방 표준으로 채택</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-07/vllm-mooncake-kv-cache-store/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-07T00:00:00Z</news:publication_date>
      <news:title>vLLM: Mooncake 분산 KV 캐시 스토어 통합으로 멀티턴 에이전트 처리량 3.8배, P50 TTFT 46배 개선</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-06/ai2-molmoact-2-open-robotics-foundation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Allen Institute: MolmoAct 2 je prvi open-source robotics foundation model koji nadmašuje GPT-5 i Gemini 2.5 Pro</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-06/amd-farskip-collective-moe-inference/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>AMD: FarSkip-Collective ubrzava MoE inferenciju 18-34 % na AMD GPU-ima</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-06/anthropic-claude-code-v2-1-131-hotfix/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Anthropic: Claude Code v2.1.131 — Windows VS Code aktivacija i Mantle x-api-key hotfix</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-06/anthropic-finance-agents-templates/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Anthropic: 10 gotovih financial-services agent templatea + Claude Opus 4.7 64,37 % na Vals AI Finance benchmarku</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-06/arxiv-2605-02503-dataclaw-eda-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02503: DataClaw — process-level benchmark mjeri kvalitetu procesa AI agenata u eksplorativnoj data analizi</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-06/arxiv-2605-03675-memtier-tiered-agent-memory/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03675: MEMTIER — višeslojna memorija dugoročnim agentima vraća pamćenje</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-06/arxiv-2605-03871-evolm-self-improving-llms/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03871: EvoLM — jezični modeli koji se sami poboljšavaju bez vanjske supervizije</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-06/arxiv-2605-04039-clinical-llm-safety-scaling/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04039: Sigurnost i točnost kliničkih LLM-ova slijede različite zakone skaliranja</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-06/aws-agentcore-browser-os-level-actions/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>AWS: AgentCore Browser dobiva OS-level akcije — 8 novih primitivki</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-06/cncf-cloud-native-observability-survey-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>CNCF: 46,7 % cloud-native timova još pokreće 2-3 paralelna observability stacka</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-06/github-secret-scanning-mcp-server-ga/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>GitHub: Secret scanning kroz MCP server u GA — AI agenti detektiraju credentiale prije commita</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-06/google-gemini-file-search-multimodal/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Google: Gemini API File Search proširen na multimodalnu pretragu slika i teksta</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-06/ibm-enterprise-advantage-context-studio/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>IBM: Enterprise Advantage dobiva Context Studio — Providence Health smanjio menadžersko vrijeme zapošljavanja 90 %</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-06/microsoft-droidspeak-kv-cache-sharing/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Microsoft Research: DroidSpeak dijeli KV cache između fine-tuned LLM varijanti za 4× veći throughput</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-06/openai-gpt-55-instant-default-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>OpenAI: GPT-5.5 Instant postaje novi default model u ChatGPT-u s manje halucinacija</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-06/uk-aisi-microsoft-frontier-ai-partnership/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>UK AISI: novi MoU s Microsoftom za frontier-AI sigurnost u 3 područja</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-06/ai2-molmoact-2-open-robotics-foundation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Allen Institute: MolmoAct 2 is the first open-source robotics foundation model to outperform GPT-5 and Gemini 2.5 Pro</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-06/amd-farskip-collective-moe-inference/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>AMD: FarSkip-Collective speeds up MoE inference by 18–34% on AMD GPUs</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-06/anthropic-claude-code-v2-1-131-hotfix/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Anthropic: Claude Code v2.1.131 — Windows VS Code activation and Mantle x-api-key hotfix</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-06/anthropic-finance-agents-templates/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Anthropic: 10 ready-made financial-services agent templates + Claude Opus 4.7 at 64.37% on Vals AI Finance benchmark</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-06/arxiv-2605-02503-dataclaw-eda-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02503: DataClaw — process-level benchmark measures the quality of AI agent workflows in exploratory data analysis</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-06/arxiv-2605-03675-memtier-tiered-agent-memory/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03675: MEMTIER — tiered memory architecture restores recall for long-running agents</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-06/arxiv-2605-03871-evolm-self-improving-llms/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03871: EvoLM — language models that improve themselves without external supervision</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-06/arxiv-2605-04039-clinical-llm-safety-scaling/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04039: Safety and accuracy in clinical LLMs follow different scaling laws</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-06/aws-agentcore-browser-os-level-actions/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>AWS: AgentCore Browser gains OS-level actions — 8 new primitives</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-06/cncf-cloud-native-observability-survey-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>CNCF: 46.7% of cloud-native teams still run 2–3 parallel observability stacks</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-06/github-secret-scanning-mcp-server-ga/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>GitHub: Secret scanning via MCP server reaches GA — AI agents detect credentials before commit</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-06/google-gemini-file-search-multimodal/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Google: Gemini API File Search expanded to multimodal image and text search</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-06/ibm-enterprise-advantage-context-studio/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>IBM: Enterprise Advantage gets Context Studio — Providence Health cut manager hiring time by 90%</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-06/microsoft-droidspeak-kv-cache-sharing/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Microsoft Research: DroidSpeak shares KV cache across fine-tuned LLM variants for 4× higher throughput</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-06/openai-gpt-55-instant-default-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>OpenAI: GPT-5.5 Instant becomes the new default ChatGPT model with fewer hallucinations</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-06/uk-aisi-microsoft-frontier-ai-partnership/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>UK AISI: new MoU with Microsoft for frontier AI safety across 3 research areas</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-06/ai2-molmoact-2-open-robotics-foundation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Allen Institute: MolmoAct 2 ist das erste Open-Source-Robotik-Foundation-Modell, das GPT-5 und Gemini 2.5 Pro übertrifft</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-06/amd-farskip-collective-moe-inference/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>AMD: FarSkip-Collective beschleunigt MoE-Inferenz um 18–34 % auf AMD-GPUs</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-06/anthropic-claude-code-v2-1-131-hotfix/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Anthropic: Claude Code v2.1.131 — Windows-VS-Code-Aktivierung und Mantle-x-api-key-Hotfix</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-06/anthropic-finance-agents-templates/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Anthropic: 10 fertige Finanzdienstleistungs-Agent-Templates + Claude Opus 4.7 mit 64,37 % auf dem Vals-AI-Finance-Benchmark</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-06/arxiv-2605-02503-dataclaw-eda-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02503: DataClaw — prozessorientierter Benchmark misst die Arbeitsqualität von KI-Agenten in der explorativen Datenanalyse</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-06/arxiv-2605-03675-memtier-tiered-agent-memory/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03675: MEMTIER — mehrstufige Speicherarchitektur gibt Langzeit-Agenten ihr Gedächtnis zurück</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-06/arxiv-2605-03871-evolm-self-improving-llms/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03871: EvoLM — Sprachmodelle, die sich ohne externe Überwachung selbst verbessern</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-06/arxiv-2605-04039-clinical-llm-safety-scaling/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04039: Sicherheit und Genauigkeit klinischer KI-Modelle folgen unterschiedlichen Skalierungsgesetzen</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-06/aws-agentcore-browser-os-level-actions/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>AWS: AgentCore Browser erhält OS-Level-Aktionen — 8 neue Primitiven</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-06/cncf-cloud-native-observability-survey-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>CNCF: 46,7 % der Cloud-Native-Teams betreiben noch immer 2–3 parallele Observability-Stacks</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-06/github-secret-scanning-mcp-server-ga/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>GitHub: Secret Scanning über MCP-Server erreicht GA — KI-Agenten erkennen Credentials vor dem Commit</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-06/google-gemini-file-search-multimodal/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Google: Gemini-API-Dateisuche auf multimodale Bild- und Textsuche erweitert</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-06/ibm-enterprise-advantage-context-studio/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>IBM: Enterprise Advantage erhält Context Studio — Providence Health reduziert Manager-Einstellungszeit um 90 %</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-06/microsoft-droidspeak-kv-cache-sharing/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Microsoft Research: DroidSpeak teilt KV-Cache zwischen feinabgestimmten LLM-Varianten für 4× höheren Durchsatz</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-06/openai-gpt-55-instant-default-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>OpenAI: GPT-5.5 Instant wird neues Standard-ChatGPT-Modell mit weniger Halluzinationen</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-06/uk-aisi-microsoft-frontier-ai-partnership/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>UK AISI: neues MoU mit Microsoft für Frontier-KI-Sicherheit in 3 Bereichen</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-06/ai2-molmoact-2-open-robotics-foundation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Allen Institute: MolmoAct 2 是首个开源机器人基础模型，超越 GPT-5 和 Gemini 2.5 Pro</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-06/amd-farskip-collective-moe-inference/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>AMD: FarSkip-Collective 将 AMD GPU 上的 MoE 推理速度提升 18–34%</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-06/anthropic-claude-code-v2-1-131-hotfix/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Anthropic: Claude Code v2.1.131 — Windows VS Code激活和Mantle x-api-key热修复</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-06/anthropic-finance-agents-templates/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Anthropic: 10 款现成金融服务代理模板 + Claude Opus 4.7 在 Vals AI Finance 基准测试中达到 64.37%</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-06/arxiv-2605-02503-dataclaw-eda-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02503: DataClaw——面向过程的基准测试衡量 AI 代理在探索性数据分析中的过程质量</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-06/arxiv-2605-03675-memtier-tiered-agent-memory/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03675: MEMTIER — 五层记忆架构让长期智能体恢复记忆能力</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-06/arxiv-2605-03871-evolm-self-improving-llms/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03871: EvoLM — 无需外部监督即可自我提升的语言模型</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-06/arxiv-2605-04039-clinical-llm-safety-scaling/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04039: 临床LLM的安全性与准确性遵循不同的缩放定律</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-06/aws-agentcore-browser-os-level-actions/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>AWS: AgentCore Browser 新增 OS 级操作——8 个新原语</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-06/cncf-cloud-native-observability-survey-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>CNCF: 46.7%的云原生团队仍在运行2-3个并行可观测性技术栈</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-06/github-secret-scanning-mcp-server-ga/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>GitHub: 通过 MCP 服务器的密钥扫描正式发布——AI 代理在提交前检测凭据</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-06/google-gemini-file-search-multimodal/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Google: Gemini API File Search 扩展至图像和文本的多模态搜索</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-06/ibm-enterprise-advantage-context-studio/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>IBM: Enterprise Advantage新增Context Studio——Providence Health将招聘管理时间缩短90%</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-06/microsoft-droidspeak-kv-cache-sharing/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Microsoft Research: DroidSpeak 在微调 LLM 变体间共享 KV 缓存，实现 4× 更高吞吐量</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-06/openai-gpt-55-instant-default-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>OpenAI: GPT-5.5 Instant 成为 ChatGPT 新默认模型，减少幻觉</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-06/uk-aisi-microsoft-frontier-ai-partnership/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>UK AISI: 与 Microsoft 签署前沿 AI 安全 3 个领域合作备忘录</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-06/ai2-molmoact-2-open-robotics-foundation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Allen Institute: MolmoAct 2 はGPT-5・Gemini 2.5 Proを上回る初のオープンソースロボティクス基盤モデル</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-06/amd-farskip-collective-moe-inference/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>AMD: FarSkip-Collective が AMD GPU 上の MoE 推論を 18〜34% 高速化</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-06/anthropic-claude-code-v2-1-131-hotfix/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Anthropic: Claude Code v2.1.131 — Windows VS Code有効化とMantle x-api-keyホットフィックス</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-06/anthropic-finance-agents-templates/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Anthropic: 10 種類の金融サービス向けエージェントテンプレートを提供、Claude Opus 4.7 が Vals AI Finance ベンチマークで 64.37% を達成</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-06/arxiv-2605-02503-dataclaw-eda-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02503: DataClaw——探索的データ分析における AI エージェントのプロセス品質を測る過程指向ベンチマーク</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-06/arxiv-2605-03675-memtier-tiered-agent-memory/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03675: MEMTIER — 五層メモリアーキテクチャが長期エージェントに記憶力を取り戻す</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-06/arxiv-2605-03871-evolm-self-improving-llms/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03871: EvoLM — 外部監督なしで自己改善する言語モデル</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-06/arxiv-2605-04039-clinical-llm-safety-scaling/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04039: 臨床LLMの安全性と精度は異なるスケーリング則に従う</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-06/aws-agentcore-browser-os-level-actions/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>AWS: AgentCore Browser が OS レベルのアクションを取得——8 つの新しいプリミティブ</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-06/cncf-cloud-native-observability-survey-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>CNCF: 46.7%のクラウドネイティブチームが2〜3つの並行オブザーバビリティスタックを運用中</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-06/github-secret-scanning-mcp-server-ga/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>GitHub: MCP サーバーを通じたシークレットスキャンが GA リリース——AI エージェントがコミット前に認証情報を検出</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-06/google-gemini-file-search-multimodal/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Google: Gemini API File Search が画像とテキストのマルチモーダル検索に対応</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-06/ibm-enterprise-advantage-context-studio/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>IBM: Enterprise AdvantageにContext Studioを追加——Providence Healthの採用管理時間を90%削減</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-06/microsoft-droidspeak-kv-cache-sharing/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Microsoft Research: DroidSpeak がファインチューニング済み LLM バリアント間で KV キャッシュを共有し、4× の高いスループットを実現</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-06/openai-gpt-55-instant-default-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>OpenAI: GPT-5.5 Instant が ChatGPT の新しいデフォルトモデルに、幻覚を削減</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-06/uk-aisi-microsoft-frontier-ai-partnership/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>UK AISI: Microsoft とフロンティア AI 安全における 3 分野の MoU を締結</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-06/ai2-molmoact-2-open-robotics-foundation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Allen Institute: MolmoAct 2, GPT-5와 Gemini 2.5 Pro를 능가하는 최초의 오픈소스 로보틱스 파운데이션 모델</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-06/amd-farskip-collective-moe-inference/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>AMD: FarSkip-Collective, AMD GPU에서 MoE 추론 속도 18~34% 향상</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-06/anthropic-claude-code-v2-1-131-hotfix/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Anthropic: Claude Code v2.1.131 — Windows VS Code 활성화 및 Mantle x-api-key 핫픽스</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-06/anthropic-finance-agents-templates/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Anthropic: 금융 서비스용 에이전트 템플릿 10종 + Claude Opus 4.7, Vals AI Finance 벤치마크에서 64.37% 달성</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-06/arxiv-2605-02503-dataclaw-eda-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02503: DataClaw——탐색적 데이터 분석에서 AI 에이전트의 프로세스 품질을 측정하는 과정 중심 벤치마크</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-06/arxiv-2605-03675-memtier-tiered-agent-memory/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03675: MEMTIER — 5계층 메모리 아키텍처로 장기 에이전트에 기억력 회복</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-06/arxiv-2605-03871-evolm-self-improving-llms/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.03871: EvoLM — 외부 감독 없이 자가 개선하는 언어 모델</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-06/arxiv-2605-04039-clinical-llm-safety-scaling/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.04039: 임상 LLM의 안전성과 정확도는 서로 다른 스케일링 법칙을 따릅니다</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-06/aws-agentcore-browser-os-level-actions/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>AWS: AgentCore Browser에 OS 수준 작업 추가——8개의 새로운 기본 기능</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-06/cncf-cloud-native-observability-survey-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>CNCF: 46.7%의 클라우드 네이티브 팀이 여전히 2-3개의 병렬 관찰 가능성 스택을 운영 중</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-06/github-secret-scanning-mcp-server-ga/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>GitHub: MCP 서버를 통한 시크릿 스캔 GA 출시——AI 에이전트가 커밋 전 자격 증명 탐지</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-06/google-gemini-file-search-multimodal/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Google: Gemini API File Search, 이미지와 텍스트의 멀티모달 검색으로 확장</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-06/ibm-enterprise-advantage-context-studio/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>IBM: Enterprise Advantage에 Context Studio 추가——Providence Health 채용 관리 시간 90% 단축</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-06/microsoft-droidspeak-kv-cache-sharing/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>Microsoft Research: DroidSpeak, 미세 조정된 LLM 변형 간 KV 캐시 공유로 4배 더 높은 처리량 달성</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-06/openai-gpt-55-instant-default-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>OpenAI: GPT-5.5 Instant, 환각 감소와 함께 ChatGPT의 새 기본 모델로 지정</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-06/uk-aisi-microsoft-frontier-ai-partnership/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-06T00:00:00Z</news:publication_date>
      <news:title>UK AISI: Microsoft와 3개 분야 프론티어 AI 안전 협력을 위한 MoU 체결</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/anthropic-enterprise-services-blackstone-goldman/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>Anthropic s Blackstoneom, Hellman &amp; Friedmanom i Goldman Sachsom osniva enterprise AI uslužnu tvrtku za mid-market</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/arxiv-agentfloor-small-models-tools/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv AgentFloor: mali open-weight modeli (0,27B-32B) zadovoljavaju kratkoročne agentne zadatke, GPT-5 zadržava prednost samo u dugoročnom planiranju</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/arxiv-gui-sd-on-policy-self-distillation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv GUI-SD: prvi on-policy self-distillation framework za GUI grounding nadmašuje GRPO na šest benchmarkova u točnosti i efikasnosti treniranja</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/arxiv-long-horizon-llm-training-instability/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02572: Dugi horizonti destabiliziraju LLM trening — ICML 2026 papir nudi &apos;horizon generalization&apos; kao rješenje</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/arxiv-reclaim-medical-claims-foundation-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02740: ReClaim — foundation model na 200 milijuna pacijentskih zapisa postiže mean AUC 75,6 % na 1000+ medicinskih zadataka</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/arxiv-saga-gpu-scheduling-agents/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv SAGA: workflow-atomic GPU scheduling za AI agente postiže 1,64× brže task completion na 64-GPU klasteru, prihvaćeno na HPDC 2026</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/arxiv-token-arena-energy-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv Token Arena: kontinuirani benchmark koji ujedinjuje energiju i kogniciju, otkriva 6,2× razliku u jouleima po točnom odgovoru između endpointa</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/arxiv-vlm-visual-jailbreak-icml-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv: Vizualne slike zaobilaze sigurnosne filtre vision-language modela u 40,9 % slučajeva, otkrivaju autori na ICML 2026</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/aws-agentcore-optimization-preview/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS Bedrock AgentCore Optimization u previewu: automatizirana petlja od produkcijskih traga do A/B testa s OpenTelemetry trace-ovima</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/aws-sagemaker-agentic-fine-tuning-workflows/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS SageMaker AI dobiva agentne workflowe za fine-tuning s 9 ugrađenih vještina i integracijom Kiroa i Claude Codea</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/claude-code-v2-1-128-mcp-zip-fixes/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>Anthropic Claude Code v2.1.128: 30+ ispravaka, .zip plugin podrška i ~3× manji cache_creation trošak za sub-agente</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/cncf-github-actions-ci-security/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>CNCF: pinning na immutable digest, least-privilege tokeni i ephemeral runneri — recipe card za sigurniji GitHub Actions pipeline</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/ibm-think-2026-ai-operating-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>IBM Think 2026: Krishna predstavio AI Operating Model temeljen na 4 stupa s watsonx Orchestrate, IBM Bobom i Sovereign Coreom</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/nist-caisi-deepseek-v4-pro-evaluation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISI: DeepSeek V4 Pro je najsposobniji kineski AI model do sada, ali zaostaje 8 mjeseci za američkim frontierom</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-05/nist-caisi-frontier-ai-testing-deepmind-msft-xai/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISI proširio frontier AI national security testiranje na Google DeepMind, Microsoft i xAI</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/anthropic-enterprise-services-blackstone-goldman/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>Anthropic launches enterprise AI services company with Blackstone, Hellman &amp; Friedman, and Goldman Sachs for mid-market</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/arxiv-agentfloor-small-models-tools/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv AgentFloor: small open-weight models (0.27B–32B) are sufficient for short-horizon agent tasks; GPT-5 retains advantage only in long-horizon planning</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/arxiv-gui-sd-on-policy-self-distillation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv GUI-SD: first on-policy self-distillation framework for GUI grounding outperforms GRPO across six benchmarks in accuracy and training efficiency</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/arxiv-long-horizon-llm-training-instability/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02572: Long Horizons Destabilize LLM Training — ICML 2026 Paper Offers &apos;Horizon Generalization&apos; as a Solution</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/arxiv-reclaim-medical-claims-foundation-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02740: ReClaim — Foundation Model Trained on 200 Million Patient Records Achieves Mean AUC 75.6% on 1,000+ Medical Tasks</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/arxiv-saga-gpu-scheduling-agents/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv SAGA: workflow-atomic GPU scheduling for AI agents achieves 1.64× faster task completion on a 64-GPU cluster, accepted at HPDC 2026</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/arxiv-token-arena-energy-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv Token Arena: continuous benchmark unifying energy and cognition reveals 6.2× difference in joules per correct answer across endpoints</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/arxiv-vlm-visual-jailbreak-icml-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv: Visual inputs bypass safety filters in vision-language models 40.9% of the time, ICML 2026 authors find</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/aws-agentcore-optimization-preview/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS Bedrock AgentCore Optimization in preview: automated loop from production traces to A/B tests via OpenTelemetry</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/aws-sagemaker-agentic-fine-tuning-workflows/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS SageMaker AI Gets Agentic Fine-Tuning Workflows with 9 Built-In Skills and Kiro and Claude Code Integration</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/claude-code-v2-1-128-mcp-zip-fixes/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>Anthropic Claude Code v2.1.128: 30+ Fixes, .zip Plugin Support and ~3× Lower cache_creation Cost for Sub-Agents</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/cncf-github-actions-ci-security/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>CNCF: immutable digest pinning, least-privilege tokens, and ephemeral runners — a recipe card for a more secure GitHub Actions pipeline</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/ibm-think-2026-ai-operating-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>IBM Think 2026: Krishna Presents AI Operating Model Built on 4 Pillars with watsonx Orchestrate, IBM Bob and Sovereign Core</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/nist-caisi-deepseek-v4-pro-evaluation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISI: DeepSeek V4 Pro is the most capable Chinese AI model to date, but trails US frontier by 8 months</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-05/nist-caisi-frontier-ai-testing-deepmind-msft-xai/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISI Expands Frontier AI National Security Testing to Google DeepMind, Microsoft and xAI</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/anthropic-enterprise-services-blackstone-goldman/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>Anthropic gründet Enterprise-KI-Dienstleistungsunternehmen mit Blackstone, Hellman &amp; Friedman und Goldman Sachs für den Mid-Market</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/arxiv-agentfloor-small-models-tools/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv AgentFloor: Kleine Open-Weight-Modelle (0,27B–32B) reichen für kurzfristige Agenten-Aufgaben aus; GPT-5 behält Vorteil nur bei langfristiger Planung</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/arxiv-gui-sd-on-policy-self-distillation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv GUI-SD: Erstes On-Policy-Self-Distillation-Framework für GUI-Grounding übertrifft GRPO auf sechs Benchmarks in Genauigkeit und Trainingseffizienz</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/arxiv-long-horizon-llm-training-instability/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02572: Lange Horizonte destabilisieren das LLM-Training — ICML-2026-Paper schlägt „Horizon Generalization” als Lösung vor</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/arxiv-reclaim-medical-claims-foundation-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02740: ReClaim — Foundation-Modell auf 200 Millionen Patientenakten erreicht durchschnittlichen AUC-Wert von 75,6 % bei über 1000 medizinischen Aufgaben</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/arxiv-saga-gpu-scheduling-agents/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv SAGA: Workflow-atomares GPU-Scheduling für KI-Agenten erreicht 1,64× schnellere Task-Completion auf 64-GPU-Cluster, angenommen auf HPDC 2026</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/arxiv-token-arena-energy-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv Token Arena: kontinuierlicher Benchmark für Energie und Kognition zeigt 6,2-fachen Unterschied in Joule pro korrekter Antwort zwischen Endpunkten</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/arxiv-vlm-visual-jailbreak-icml-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv: Visuelle Eingaben umgehen Sicherheitsfilter von Vision-Language-Modellen in 40,9 % der Fälle, zeigt ICML-2026-Studie</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/aws-agentcore-optimization-preview/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS Bedrock AgentCore Optimization in der Vorschau: automatisierte Schleife von Produktions-Traces bis A/B-Tests via OpenTelemetry</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/aws-sagemaker-agentic-fine-tuning-workflows/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS SageMaker AI erhält agentische Fine-Tuning-Workflows mit 9 integrierten Skills und Kiro- und Claude-Code-Integration</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/claude-code-v2-1-128-mcp-zip-fixes/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>Anthropic Claude Code v2.1.128: 30+ Korrekturen, .zip-Plugin-Unterstützung und ~3× niedrigere cache_creation-Kosten für Sub-Agenten</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/cncf-github-actions-ci-security/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>CNCF: Unveränderliches Digest-Pinning, Least-Privilege-Token und ephemere Runner — Rezeptkarte für sicherere GitHub-Actions-Pipelines</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/ibm-think-2026-ai-operating-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>IBM Think 2026: Krishna stellt KI-Betriebsmodell auf 4 Säulen mit watsonx Orchestrate, IBM Bob und Sovereign Core vor</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/nist-caisi-deepseek-v4-pro-evaluation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISI: DeepSeek V4 Pro ist bisher fähigstes chinesisches KI-Modell, liegt aber 8 Monate hinter US-Frontier</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-05/nist-caisi-frontier-ai-testing-deepmind-msft-xai/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISI weitet Frontier-KI-Sicherheitstests auf Google DeepMind, Microsoft und xAI aus</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/anthropic-enterprise-services-blackstone-goldman/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>Anthropic联合Blackstone、Hellman &amp; Friedman和Goldman Sachs成立面向中端市场的企业AI服务公司</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/arxiv-agentfloor-small-models-tools/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv AgentFloor：小型开放权重模型（0.27B-32B）能胜任短期智能体任务，GPT-5仅在长期规划上保持优势</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/arxiv-gui-sd-on-policy-self-distillation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv GUI-SD：首个面向GUI定位的在线自蒸馏框架，在六个基准上超越GRPO强化学习</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/arxiv-long-horizon-llm-training-instability/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02572: 长时域使LLM训练不稳定 — ICML 2026论文提出“时域泛化”解决方案</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/arxiv-reclaim-medical-claims-foundation-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02740: ReClaim — 基于2亿患者记录训练的基础模型在1000+医疗任务上达到平均AUC 75.6%</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/arxiv-saga-gpu-scheduling-agents/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv SAGA：AI智能体的工作流原子化GPU调度在64-GPU集群上实现1.64倍任务完成提速，被HPDC 2026接收</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/arxiv-token-arena-energy-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv Token Arena：统一能耗与认知的持续基准，揭示端点间每正确答案能耗6.2倍差距</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/arxiv-vlm-visual-jailbreak-icml-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv：视觉图像以40.9%的成功率绕过视觉语言模型安全过滤器，ICML 2026论文揭示</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/aws-agentcore-optimization-preview/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS Bedrock AgentCore Optimization进入预览：从生产追踪到A/B测试的自动化循环，基于OpenTelemetry追踪</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/aws-sagemaker-agentic-fine-tuning-workflows/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS SageMaker AI 推出9项内置技能的智能体微调工作流，集成Kiro与Claude Code</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/claude-code-v2-1-128-mcp-zip-fixes/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>Anthropic Claude Code v2.1.128：30余项修复、.zip插件支持及子代理缓存创建成本降低约3倍</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/cncf-github-actions-ci-security/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>CNCF：固定到不可变摘要、最小权限令牌和临时运行器——更安全的GitHub Actions管道实践指南</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/ibm-think-2026-ai-operating-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>IBM Think 2026：Krishna发布基于4大支柱的AI操作模型，涵盖watsonx Orchestrate、IBM Bob与Sovereign Core</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/nist-caisi-deepseek-v4-pro-evaluation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISI：DeepSeek V4 Pro是迄今最强中国AI模型，但落后美国前沿约8个月</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-05/nist-caisi-frontier-ai-testing-deepmind-msft-xai/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISI将前沿AI国家安全测试扩展至谷歌DeepMind、微软和xAI</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/anthropic-enterprise-services-blackstone-goldman/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AnthropicがBlackstone、Hellman &amp; Friedman、Goldman Sachsと中堅市場向けエンタープライズAIサービス会社を設立</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/arxiv-agentfloor-small-models-tools/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv AgentFloor：小型オープンウェイトモデル（0.27B-32B）が短期エージェントタスクに十分、GPT-5は長期計画のみで優位を維持</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/arxiv-gui-sd-on-policy-self-distillation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv GUI-SD：GUIグラウンディング向け初のオンポリシー自己蒸留フレームワーク、6つのベンチマークでGRPO強化学習を凌駕</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/arxiv-long-horizon-llm-training-instability/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02572: 長いホライズンがLLM学習を不安定化 — ICML 2026論文が「ホライズン汎化」を解決策として提案</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/arxiv-reclaim-medical-claims-foundation-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02740: ReClaim — 2億件の患者記録で学習した基盤モデルが1,000超の医療タスクで平均AUC 75.6%を達成</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/arxiv-saga-gpu-scheduling-agents/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv SAGA：AIエージェント向けワークフロー原子化GPUスケジューリング、64-GPUクラスターでタスク完了を1.64倍高速化、HPDC 2026採択</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/arxiv-token-arena-energy-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv Token Arena：エネルギーと認知を統合する継続的ベンチマーク、エンドポイント間で正解あたりエネルギーの6.2倍の差を発見</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/arxiv-vlm-visual-jailbreak-icml-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv：視覚画像がVLMの安全フィルターを40.9%の確率で回避、ICML 2026論文が明らかに</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/aws-agentcore-optimization-preview/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS Bedrock AgentCore Optimizationがプレビュー公開：OpenTelemetryトレースで本番環境からA/Bテストまでの自動化ループを実現</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/aws-sagemaker-agentic-fine-tuning-workflows/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS SageMaker AI が9つのスキルを持つエージェント型ファインチューニングワークフローを導入、KiroとClaude Codeと統合</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/claude-code-v2-1-128-mcp-zip-fixes/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>Anthropic Claude Code v2.1.128：30件以上の修正、.zipプラグイン対応、サブエージェントのキャッシュ作成コストが約3分の1に</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/cncf-github-actions-ci-security/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>CNCF：不変ダイジェストへのピン留め、最小権限トークン、エフェメラルランナー——より安全なGitHub ActionsパイプラインへのレシピカードCNCF発表</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/ibm-think-2026-ai-operating-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>IBM Think 2026：KrishnaがwatsonxOrchestrate・IBM Bob・Sovereign Coreを柱とするAI Operating Modelを発表</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/nist-caisi-deepseek-v4-pro-evaluation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISI：DeepSeek V4 Proはこれまで評価した中で最も優れた中国AIモデルだが、米国フロンティアに8ヶ月遅れ</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-05/nist-caisi-frontier-ai-testing-deepmind-msft-xai/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISIがフロンティアAI国家安全テストをGoogle DeepMind・Microsoft・xAIに拡大</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/anthropic-enterprise-services-blackstone-goldman/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>Anthropic, Blackstone·Hellman &amp; Friedman·Goldman Sachs와 중소기업 대상 엔터프라이즈 AI 서비스 회사 설립</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/arxiv-agentfloor-small-models-tools/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv AgentFloor：소형 오픈웨이트 모델(0.27B-32B)이 단기 에이전트 작업에 충분, GPT-5는 장기 계획에서만 우위</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/arxiv-gui-sd-on-policy-self-distillation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv GUI-SD：GUI 그라운딩을 위한 최초의 온폴리시 자기 증류 프레임워크, 6개 벤치마크에서 GRPO 강화학습 능가</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/arxiv-long-horizon-llm-training-instability/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02572: 긴 호라이즌이 LLM 학습을 불안정하게 만든다 — ICML 2026 논문, &apos;호라이즌 일반화&apos;를 해결책으로 제시</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/arxiv-reclaim-medical-claims-foundation-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>arXiv:2605.02740: ReClaim — 2억 건 환자 기록으로 학습한 파운데이션 모델, 1,000개 이상 의료 과제에서 평균 AUC 75.6% 달성</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/arxiv-saga-gpu-scheduling-agents/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv SAGA：AI 에이전트를 위한 워크플로우 원자화 GPU 스케줄링, 64-GPU 클러스터에서 작업 완료 1.64배 단축, HPDC 2026 채택</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/arxiv-token-arena-energy-benchmark/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv Token Arena：에너지와 인지를 통합한 지속적 벤치마크, 엔드포인트 간 정답당 에너지 6.2배 차이 발견</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/arxiv-vlm-visual-jailbreak-icml-2026/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>ArXiv：시각 이미지가 VLM 안전 필터를 40.9% 확률로 우회, ICML 2026 논문 공개</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/aws-agentcore-optimization-preview/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS Bedrock AgentCore Optimization 프리뷰 출시：OpenTelemetry 트레이스로 생산에서 A/B 테스트까지 자동화 루프 구현</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/aws-sagemaker-agentic-fine-tuning-workflows/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>AWS SageMaker AI, 9개 내장 스킬 에이전트 파인튜닝 워크플로우 도입 — Kiro 및 Claude Code 통합</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/claude-code-v2-1-128-mcp-zip-fixes/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>Anthropic Claude Code v2.1.128: 30건 이상 수정, .zip 플러그인 지원, 서브에이전트 캐시 생성 비용 약 3배 절감</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/cncf-github-actions-ci-security/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>CNCF：불변 다이제스트 고정, 최소 권한 토큰, 임시 러너——더 안전한 GitHub Actions 파이프라인을 위한 레시피 카드</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/ibm-think-2026-ai-operating-model/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>IBM Think 2026: Krishna, 4대 기둥 기반 AI 운영 모델 발표 — watsonx Orchestrate·IBM Bob·Sovereign Core 공개</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/nist-caisi-deepseek-v4-pro-evaluation/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISI：DeepSeek V4 Pro, 지금까지 평가된 최강 중국 AI 모델이지만 미국 프런티어에 8개월 뒤처져</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-05/nist-caisi-frontier-ai-testing-deepmind-msft-xai/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-05T00:00:00Z</news:publication_date>
      <news:title>NIST CAISI, 프런티어 AI 국가 안보 테스트를 Google DeepMind·Microsoft·xAI로 확대</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/arxiv-adamezo-zeroth-order-llm-finetuning/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>AdaMeZO: fino ugađanje LLM-ova Adam-stilom bez pohrane momenata u GPU memoriji</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/arxiv-aem-adaptive-entropy-modulation-rl-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv AEM: adaptivna modulacija entropije za multi-turn RL agente postiže +1,4 % na SWE-bench Verified</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/arxiv-armor-2025-vojni-llm-sigurnost/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv ARMOR 2025: prvi vojni benchmark za LLM sigurnost s 519 promptova kroz 21 komercijalni model</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/arxiv-bayes-consistent-agentic-orchestration-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>Position paper s 30 autora na ICML 2026: orkestracija agentnih AI sustava mora biti Bayes-konzistentna</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/arxiv-bwla-w1ax-kvantizacija-llm/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>BWLA: 1-bitna kvantizacija LLM-ova s 3,26× ubrzanjem i 70% boljim rezultatima (ACL 2026)</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/arxiv-stable-gflownet-llm-red-teaming-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026 Spotlight: Stable-GFlowNet uvodi stabilnije i raznovrsnije automatizirano red-teamanje LLM-ova</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/arxiv-tool-calling-framework-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv okvir &apos;To Call or Not to Call&apos; otkriva da LLM-ovi pogrešno procjenjuju kad im trebaju vanjski alati</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/arxiv-tool-use-tax-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv: skriveni trošak alata u LLM agentima — &quot;tool-use tax&quot; smanjuje točnost čak i kad alati pomažu</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/hr/news/2026-05-04/ibm-ceo-study-c-suite-ai-restrukturiranje/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>hr</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>IBM studija: 76 % organizacija ima Chief AI Officera, CEO-i očekuju 48 % autonomnih AI odluka do 2030.</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/arxiv-adamezo-zeroth-order-llm-finetuning/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>AdaMeZO: Adam-style LLM fine-tuning without storing gradient moments in GPU memory</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/arxiv-aem-adaptive-entropy-modulation-rl-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv AEM: Adaptive Entropy Modulation for multi-turn RL agents achieves +1.4% on SWE-bench Verified</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/arxiv-armor-2025-vojni-llm-sigurnost/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv ARMOR 2025: first military LLM safety benchmark with 519 prompts across 21 commercial models</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/arxiv-bayes-consistent-agentic-orchestration-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>Position paper by 30 authors at ICML 2026: agentic AI orchestration must be Bayes-consistent</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/arxiv-bwla-w1ax-kvantizacija-llm/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>BWLA: 1-bit LLM quantization with 3.26× speedup and 70% better results (ACL 2026)</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/arxiv-stable-gflownet-llm-red-teaming-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026 Spotlight: Stable-GFlowNet introduces more stable and diverse automated LLM red-teaming</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/arxiv-tool-calling-framework-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv &apos;To Call or Not to Call&apos; framework reveals LLMs misjudge when they need external tools</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/arxiv-tool-use-tax-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv: the hidden cost of tools in LLM agents — &apos;tool-use tax&apos; reduces accuracy even when tools help</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/en/news/2026-05-04/ibm-ceo-study-c-suite-ai-restrukturiranje/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>IBM study: 76% of organizations have a Chief AI Officer, CEOs expect 48% autonomous AI decisions by 2030</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/arxiv-adamezo-zeroth-order-llm-finetuning/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>AdaMeZO: Adam-Stil LLM-Fine-Tuning ohne Speicherung von Gradientenmomenten im GPU-Speicher</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/arxiv-aem-adaptive-entropy-modulation-rl-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv AEM: Adaptive Entropiemodulation für Multi-Turn-RL-Agenten erreicht +1,4 % auf SWE-bench Verified</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/arxiv-armor-2025-vojni-llm-sigurnost/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv ARMOR 2025: erster militärischer LLM-Sicherheitsbenchmark mit 519 Prompts über 21 kommerzielle Modelle</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/arxiv-bayes-consistent-agentic-orchestration-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>Position Paper von 30 Autoren auf ICML 2026: Orchestrierung agentischer KI-Systeme muss Bayes-konsistent sein</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/arxiv-bwla-w1ax-kvantizacija-llm/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>BWLA: 1-Bit-Quantisierung von Sprachmodellen mit 3,26-facher Beschleunigung und 70 % besseren Ergebnissen (ACL 2026)</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/arxiv-stable-gflownet-llm-red-teaming-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026 Spotlight: Stable-GFlowNet führt stabileres und vielfältigeres automatisiertes Red-Teaming von Sprachmodellen ein</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/arxiv-tool-calling-framework-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv-Rahmen &apos;To Call or Not to Call&apos; zeigt: Sprachmodelle beurteilen falsch, wann sie externe Werkzeuge brauchen</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/arxiv-tool-use-tax-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv: die versteckten Kosten von Werkzeugen in LLM-Agenten — &apos;Tool-Use Tax&apos; senkt Genauigkeit selbst wenn Werkzeuge helfen</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/de/news/2026-05-04/ibm-ceo-study-c-suite-ai-restrukturiranje/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>de</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>IBM-Studie: 76 % der Unternehmen haben einen Chief AI Officer, CEOs erwarten 48 % autonome KI-Entscheidungen bis 2030</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/arxiv-adamezo-zeroth-order-llm-finetuning/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>AdaMeZO：以类Adam方式微调LLM，无需在GPU内存中存储动量</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/arxiv-aem-adaptive-entropy-modulation-rl-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv AEM：多轮RL智能体的自适应熵调制在SWE-bench Verified上提升+1.4%</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/arxiv-armor-2025-vojni-llm-sigurnost/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv ARMOR 2025：519个提示词测试21个商业LLM的军事安全性基准</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/arxiv-bayes-consistent-agentic-orchestration-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026 立场论文：30位作者认为智能体AI系统的编排必须符合贝叶斯一致性</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/arxiv-bwla-w1ax-kvantizacija-llm/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>BWLA：1位量化LLM实现3.26倍加速和70%更好结果（ACL 2026）</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/arxiv-stable-gflownet-llm-red-teaming-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026 Spotlight：Stable-GFlowNet引入更稳定、更多样化的LLM自动化红队测试</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/arxiv-tool-calling-framework-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv框架「是否调用」揭示LLM错误判断何时需要外部工具</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/arxiv-tool-use-tax-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv：LLM智能体工具的隐性成本——「工具使用税」即使工具有帮助也会降低准确性</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/zh/news/2026-05-04/ibm-ceo-study-c-suite-ai-restrukturiranje/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>zh</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>IBM研究：76%的组织有首席AI官，CEO预计到2030年48%的运营决策将由AI自主作出</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/arxiv-adamezo-zeroth-order-llm-finetuning/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>AdaMeZO：GPU メモリにモーメントを保存せずAdam方式でLLMをファインチューニング</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/arxiv-aem-adaptive-entropy-modulation-rl-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv AEM：マルチターンRL エージェントの適応的エントロピー変調がSWE-bench Verifiedで+1.4%を達成</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/arxiv-armor-2025-vojni-llm-sigurnost/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv ARMOR 2025：519の軍事プロンプトで21の商用LLMの安全性を評価する初の軍事ベンチマーク</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/arxiv-bayes-consistent-agentic-orchestration-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026立場論文：30名の著者がエージェントAIのオーケストレーションはベイズ一貫性を持つべきと主張</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/arxiv-bwla-w1ax-kvantizacija-llm/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>BWLA：1ビット量子化LLMで3.26倍の高速化と70%の改善を達成（ACL 2026）</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/arxiv-stable-gflownet-llm-red-teaming-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026 Spotlight：Stable-GFlowNetがより安定した多様なLLM自動レッドチーミングを実現</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/arxiv-tool-calling-framework-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXivフレームワーク「呼ぶべきか否か」がLLMの外部ツール判断ミスを明らかに</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/arxiv-tool-use-tax-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv：LLMエージェントのツールの隠れたコスト——「ツール使用税」はツールが役立つ時でも精度を下げる</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ja/news/2026-05-04/ibm-ceo-study-c-suite-ai-restrukturiranje/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ja</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>IBM調査：76%の組織がChief AI Officerを設置、CEOは2030年までにAIが48%の運営決定を自律的に下すと予測</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/arxiv-adamezo-zeroth-order-llm-finetuning/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>AdaMeZO: GPU 메모리에 모멘트를 저장하지 않고 Adam 방식으로 LLM 파인튜닝하는 새 최적화기</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/arxiv-aem-adaptive-entropy-modulation-rl-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv AEM: 멀티턴 RL 에이전트를 위한 적응형 엔트로피 변조, SWE-bench Verified에서 +1.4% 향상</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/arxiv-armor-2025-vojni-llm-sigurnost/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv ARMOR 2025: 519개 프롬프트로 21개 상용 LLM의 군사 안전성을 평가한 최초의 군사 벤치마크</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/arxiv-bayes-consistent-agentic-orchestration-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026 포지션 페이퍼: 저자 30명, 에이전트 AI 오케스트레이션은 베이즈 일관성을 가져야 한다</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/arxiv-bwla-w1ax-kvantizacija-llm/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>BWLA: 1비트 양자화 LLM으로 3.26배 가속 및 70% 향상 달성 (ACL 2026)</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/arxiv-stable-gflownet-llm-red-teaming-icml/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ICML 2026 Spotlight: Stable-GFlowNet, 더 안정적이고 다양한 LLM 자동화 레드팀 테스트 도입</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/arxiv-tool-calling-framework-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv 프레임워크 &apos;호출할 것인가 말 것인가&apos;: LLM이 외부 도구 필요성을 잘못 판단한다는 것을 밝혀</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/arxiv-tool-use-tax-llm-agenti/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>ArXiv: LLM 에이전트 도구의 숨겨진 비용 - &apos;도구 사용세&apos;는 도구가 도움이 될 때도 정확도를 낮춘다</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://24-ai.news/ko/news/2026-05-04/ibm-ceo-study-c-suite-ai-restrukturiranje/</loc>
    <news:news>
      <news:publication>
        <news:name>24 AI</news:name>
        <news:language>ko</news:language>
      </news:publication>
      <news:publication_date>2026-05-04T00:00:00Z</news:publication_date>
      <news:title>IBM 연구: 조직의 76%가 최고AI책임자를 보유, CEO들은 2030년까지 AI가 운영 결정의 48%를 자율적으로 내릴 것으로 예상</news:title>
    </news:news>
  </url>
</urlset>
