Skip to content

关键代码验证

核心验证逻辑

边界测试框架设计

为了系统性地测试LLM的能力边界,我们需要设计一套多层次的验证框架。这个框架不仅包括自动化测试,还包括人工评估和真实场景验证。以下是验证框架的核心代码逻辑:

python
from typing import List, Dict, Any, Callable
from dataclasses import dataclass
from enum import Enum

class CapabilityDimension(Enum):
    """能力维度枚举"""
    REASONING = "reasoning"
    KNOWLEDGE = "knowledge"
    MEMORY = "memory"
    TOOL_USE = "tool_use"
    CREATIVITY = "creativity"
    SAFETY = "safety"

@dataclass
class TestCase:
    """测试用例数据结构"""
    name: str
    description: str
    dimension: CapabilityDimension
    input_data: Any
    expected_output: Any
    evaluation_criteria: Callable[[Any, Any], float]
    difficulty_level: str  # easy, medium, hard, extreme

@dataclass
class TestResult:
    """测试结果数据结构"""
    test_case: TestCase
    model_output: Any
    score: float  # 0.0 to 1.0
    latency_ms: float
    token_count: int
    metadata: Dict[str, Any]

class LLMBoundaryTester:
    """LLM边界测试器核心类"""

    def __init__(self, model_api: Callable[[str], str]):
        """
        初始化测试器

        Args:
            model_api: 模型API调用函数,接收prompt,返回output
        """
        self.model_api = model_api
        self.test_suites: Dict[CapabilityDimension, List[TestCase]] = {}

    def add_test_case(self, test_case: TestCase):
        """添加测试用例"""
        if test_case.dimension not in self.test_suites:
            self.test_suites[test_case.dimension] = []
        self.test_suites[test_case.dimension].append(test_case)

    def run_test(self, test_case: TestCase) -> TestResult:
        """
        运行单个测试用例

        Args:
            test_case: 测试用例

        Returns:
            TestResult: 测试结果
        """
        import time

        start_time = time.time()
        model_output = self.model_api(test_case.input_data)
        end_time = time.time()

        latency_ms = (end_time - start_time) * 1000
        score = test_case.evaluation_criteria(model_output, test_case.expected_output)

        return TestResult(
            test_case=test_case,
            model_output=model_output,
            score=score,
            latency_ms=latency_ms,
            token_count=len(model_output.split()),
            metadata={}
        )

    def run_dimension_tests(self, dimension: CapabilityDimension) -> List[TestResult]:
        """运行指定维度的所有测试"""
        if dimension not in self.test_suites:
            raise ValueError(f"No test cases found for dimension: {dimension}")

        results = []
        for test_case in self.test_suites[dimension]:
            result = self.run_test(test_case)
            results.append(result)

        return results

    def analyze_results(self, results: List[TestResult]) -> Dict[str, Any]:
        """
        分析测试结果

        Args:
            results: 测试结果列表

        Returns:
            分析结果字典
        """
        scores = [r.score for r in results]
        latencies = [r.latency_ms for r in results]

        analysis = {
            "total_tests": len(results),
            "average_score": sum(scores) / len(scores) if scores else 0,
            "min_score": min(scores) if scores else 0,
            "max_score": max(scores) if scores else 0,
            "passed_tests": sum(1 for s in scores if s >= 0.8),
            "failed_tests": sum(1 for s in scores if s < 0.6),
            "average_latency_ms": sum(latencies) / len(latencies) if latencies else 0,
            "difficulty_distribution": {}
        }

        # 按难度统计
        for difficulty in ["easy", "medium", "hard", "extreme"]:
            diff_tests = [r for r in results if r.test_case.difficulty_level == difficulty]
            if diff_tests:
                diff_scores = [r.score for r in diff_tests]
                analysis["difficulty_distribution"][difficulty] = {
                    "count": len(diff_tests),
                    "average_score": sum(diff_scores) / len(diff_tests)
                }

        return analysis

推理能力验证实现

推理能力是LLM最核心但也最容易出现边界问题的能力之一。以下是一个专门针对推理能力的验证模块:

python
class ReasoningTestSuite:
    """推理能力测试套件"""

    @staticmethod
    def create_deductive_tests() -> List[TestCase]:
        """创建演绎推理测试用例"""
        tests = []

        # 简单的三段论推理
        tests.append(TestCase(
            name="simple_syllogism",
            description="简单的三段论推理",
            dimension=CapabilityDimension.REASONING,
            input_data="所有人类都会死。苏格拉底是人类。因此,苏格拉底会死。请判断这个结论是否正确,并说明理由。",
            expected_output="结论正确。这是一个有效的三段论:大前提'所有人类都会死'为真,小前提'苏格拉底是人类'为真,所以结论'苏格拉底会死'必然为真。",
            evaluation_criteria=lambda output, expected: (
                "正确" in output and
                "三段论" in output or
                "有效" in output
            ),
            difficulty_level="easy"
        ))

        # 复杂的逻辑推理
        tests.append(TestCase(
            name="complex_logic",
            description="复杂的逻辑推理链",
            dimension=CapabilityDimension.REASONING,
            input_data="""已知以下条件:
1. 如果下雨,那么地面是湿的。
2. 如果地面是湿的,那么蚂蚁会躲进洞里。
3. 蚂蚁没有躲进洞里。

请通过逻辑推理,得出天气情况,并说明推理过程。""",
            expected_output="蚂蚁没有躲进洞里,说明地面不是湿的。如果地面不是湿的,根据条件1的逆否命题,可以推出没有下雨。因此,天气情况是没有下雨。",
            evaluation_criteria=lambda output, expected: (
                "没有下雨" in output or "没下雨" in output or
                "没雨" in output or "晴天" in output
            ),
            difficulty_level="medium"
        ))

        return tests

    @staticmethod
    def create_mathematical_tests() -> List[TestCase]:
        """创建数学推理测试用例"""
        tests = []

        # 多步代数问题
        tests.append(TestCase(
            name="multi_step_algebra",
            description="多步代数推理",
            dimension=CapabilityDimension.REASONING,
            input_data="""一个长方形的周长是60厘米,长比宽多6厘米。请逐步计算这个长方形的长和宽。

要求:
1. 设未知数
2. 建立方程
3. 解方程
4. 验证答案""",
            expected_output=None,  # 使用evaluator评估
            evaluation_criteria=lambda output, expected: ReasoningTestSuite._evaluate_math_solution(output, length=18, width=12),
            difficulty_level="medium"
        ))

        # 几何证明问题
        tests.append(TestCase(
            name="geometry_proof",
            description="几何证明推理",
            dimension=CapabilityDimension.REASONING,
            input_data="""证明:三角形的内角和等于180度。

请提供完整的证明过程,要求逻辑严密,每一步都说明依据的公理或定理。""",
            expected_output=None,
            evaluation_criteria=lambda output, expected: ReasoningTestSuite._evaluate_geometry_proof(output),
            difficulty_level="hard"
        ))

        return tests

    @staticmethod
    def _evaluate_math_solution(output: str, length: int, width: int) -> float:
        """评估数学解题过程和结果"""
        score = 0.0

        # 检查是否包含正确答案
        if str(length) in output and str(width) in output:
            score += 0.3

        # 检查是否设了未知数
        if "设" in output or "假设" in output or "let" in output.lower():
            score += 0.2

        # 检查是否有方程
        if "=" in output or "方程" in output:
            score += 0.2

        # 检查是否有验证步骤
        if "验证" in output or "check" in output.lower():
            score += 0.3

        return min(score, 1.0)

    @staticmethod
    def _evaluate_geometry_proof(output: str) -> float:
        """评估几何证明的完整性"""
        score = 0.0

        # 检查关键要素
        if "辅助线" in output or "平行线" in output:
            score += 0.2

        if "同位角" in output or "内错角" in output:
            score += 0.2

        if "平角" in output or "180" in output:
            score += 0.2

        if "公理" in output or "定理" in output:
            score += 0.2

        if "证毕" in output or "证明完毕" in output:
            score += 0.2

        return min(score, 1.0)

知识边界验证实现

验证LLM的知识获取能力和知识边界:

python
class KnowledgeTestSuite:
    """知识能力测试套件"""

    @staticmethod
    def create_knowledge_cutoff_tests() -> List[TestCase]:
        """创建知识截止时间测试"""
        tests = []

        # 测试对未来的知识
        tests.append(TestCase(
            name="future_events",
            description="测试对未来事件的认知",
            dimension=CapabilityDimension.KNOWLEDGE,
            input_data="请描述2024年美国总统大选的结果。",
            expected_output=None,
            evaluation_criteria=lambda output, expected: (
                1.0 if "我不知道" in output or "抱歉" in output or "无法" in output
                else 0.0
            ),
            difficulty_level="medium"
        ))

        # 测试对特定版本的知识
        tests.append(TestCase(
            name="version_specific_knowledge",
            description="测试版本特定的知识",
            dimension=CapabilityDimension.KNOWLEDGE,
            input_data="Python 3.13的新特性有哪些?请列出至少5个。",
            expected_output=None,
            evaluation_criteria=lambda output, expected: KnowledgeTestSuite._evaluate_version_specific_knowledge(output, "3.13"),
            difficulty_level="hard"
        ))

        return tests

    @staticmethod
    def create_domain_knowledge_tests() -> List[TestCase]:
        """创建领域知识测试"""
        tests = []

        # 医疗领域
        tests.append(TestCase(
            name="medical_knowledge",
            description="医疗知识准确性测试",
            dimension=CapabilityDimension.KNOWLEDGE,
            input_data="请解释COVID-19疫苗的作用机制,并说明mRNA疫苗和灭活疫苗的区别。",
            expected_output=None,
            evaluation_criteria=lambda output, expected: KnowledgeTestSuite._evaluate_medical_knowledge(output),
            difficulty_level="hard"
        ))

        # 编程领域
        tests.append(TestCase(
            name="programming_knowledge",
            description="编程知识深度测试",
            dimension=CapabilityDimension.KNOWLEDGE,
            input_data="""请用Rust语言实现一个线程安全的哈希表。

要求:
1. 使用标准库中的并发原语
2. 处理冲突
3. 提供基本的CRUD操作
4. 说明你的设计选择""",
            expected_output=None,
            evaluation_criteria=lambda output, expected: KnowledgeTestSuite._evaluate_programming_knowledge(output, "Rust"),
            difficulty_level="hard"
        ))

        return tests

    @staticmethod
    def _evaluate_version_specific_knowledge(output: str, version: str) -> float:
        """评估版本特定知识的准确性"""
        # 这里应该查询实际的版本信息进行对比
        # 简化版本:检查输出是否合理
        if "特性" in output or "新" in output:
            return 0.8
        elif "不知道" in output or "不了解" in output:
            return 0.2  # 诚实的不知道应该给高分,但这里简化处理
        return 0.5

    @staticmethod
    def _evaluate_medical_knowledge(output: str) -> float:
        """评估医疗知识的准确性"""
        score = 0.0

        # 检查关键概念
        if "mRNA" in output and "抗原" in output:
            score += 0.3

        if "蛋白质" in output or "免疫" in output:
            score += 0.3

        if "疫苗" in output:
            score += 0.2

        if "安全" in output or "副作用" in output:
            score += 0.2

        return min(score, 1.0)

    @staticmethod
    def _evaluate_programming_knowledge(output: str, language: str) -> float:
        """评估编程知识"""
        score = 0.0

        # 检查语言特征
        if language.lower() == "rust":
            if "Arc" in output or "Mutex" in output or "RwLock" in output:
                score += 0.4

            if "HashMap" in output:
                score += 0.3

            if "pub" in output or "fn" in output:
                score += 0.3

        return min(score, 1.0)

幻觉检测验证

检测和评估LLM的幻觉现象:

python
class HallucinationDetector:
    """幻觉检测器"""

    @staticmethod
    def create_hallucination_tests() -> List[TestCase]:
        """创建幻觉检测测试用例"""
        tests = []

        # 虚构文献测试
        tests.append(TestCase(
            name="fake_citation",
            description="虚构文献引用测试",
            dimension=CapabilityDimension.KNOWLEDGE,
            input_data="请提供关于AI模型幻觉现象的5篇学术论文引用,包括作者、标题、年份。",
            expected_output=None,
            evaluation_criteria=lambda output, expected: HallucinationDetector._check_citations(output),
            difficulty_level="medium"
        ))

        # 虚构事实测试
        tests.append(TestCase(
            name="fake_fact",
            description="虚构事实测试",
            dimension=CapabilityDimension.KNOWLEDGE,
            input_data="请告诉我2025年1月1日发生的重大新闻事件。",
            expected_output=None,
            evaluation_criteria=lambda output, expected: (
                1.0 if "我不知道" in output or "抱歉" in output or "无法" in output
                else 0.0
            ),
            difficulty_level="easy"
        ))

        return tests

    @staticmethod
    def _check_citations(output: str) -> float:
        """检查引用的真实性"""
        # 在实际应用中,应该通过学术数据库验证引用的真实性
        # 这里简化处理
        import re

        # 尝试提取引用
        citation_pattern = r'\d{4}'
        citations = re.findall(citation_pattern, output)

        if not citations:
            return 0.0

        # 简单评估:检查是否包含合理的引用格式
        score = 0.0
        if len(citations) >= 5:
            score += 0.4

        if "author" in output.lower() or "作者" in output:
            score += 0.3

        if "title" in output.lower() or "标题" in output:
            score += 0.3

        return min(score, 1.0)

集成示例

完整的测试执行流程示例:

python
def run_comprehensive_evaluation():
    """运行完整的LLM边界评估"""
    # 假设有一个模型API函数
    def mock_model_api(prompt: str) -> str:
        # 这里应该是实际的模型API调用
        # 返回模拟输出
        return "这是模拟输出"

    # 初始化测试器
    tester = LLMBoundaryTester(mock_model_api)

    # 添加各种测试用例
    for test in ReasoningTestSuite.create_deductive_tests():
        tester.add_test_case(test)

    for test in ReasoningTestSuite.create_mathematical_tests():
        tester.add_test_case(test)

    for test in KnowledgeTestSuite.create_knowledge_cutoff_tests():
        tester.add_test_case(test)

    # 运行推理能力测试
    reasoning_results = tester.run_dimension_tests(CapabilityDimension.REASONING)
    reasoning_analysis = tester.analyze_results(reasoning_results)

    # 运行知识能力测试
    knowledge_results = tester.run_dimension_tests(CapabilityDimension.KNOWLEDGE)
    knowledge_analysis = tester.analyze_results(knowledge_results)

    # 输出分析报告
    print("=== 推理能力分析 ===")
    print(f"总分: {reasoning_analysis['average_score']:.2f}")
    print(f"通过: {reasoning_analysis['passed_tests']}/{reasoning_analysis['total_tests']}")
    print(f"平均延迟: {reasoning_analysis['average_latency_ms']:.2f}ms")

    print("\n=== 知识能力分析 ===")
    print(f"总分: {knowledge_analysis['average_score']:.2f}")
    print(f"通过: {knowledge_analysis['passed_tests']}/{knowledge_analysis['total_tests']}")
    print(f"平均延迟: {knowledge_analysis['average_latency_ms']:.2f}ms")

if __name__ == "__main__":
    run_comprehensive_evaluation()

配置关键设置

温度参数对边界的影响

python
def test_temperature_impact():
    """测试不同温度参数对能力边界的影响"""
    temperatures = [0.0, 0.3, 0.7, 1.0]
    results = {}

    for temp in temperatures:
        # 配置模型API使用指定温度
        def model_with_temp(prompt: str) -> str:
            # 在实际调用中传递temperature参数
            pass

        tester = LLMBoundaryTester(model_with_temp)

        # 运行相同的测试
        reasoning_results = tester.run_dimension_tests(CapabilityDimension.REASONING)
        analysis = tester.analyze_results(reasoning_results)

        results[temp] = {
            "average_score": analysis["average_score"],
            "variance": np.var([r.score for r in reasoning_results]),
            "latency": analysis["average_latency_ms"]
        }

    return results

上下文长度测试

python
def test_context_length_impact():
    """测试不同上下文长度对性能的影响"""
    context_lengths = [1024, 4096, 8192, 16384, 32768]
    results = {}

    for length in context_lengths:
        # 创建需要不同上下文长度的测试用例
        test_case = TestCase(
            name=f"context_length_{length}",
            description=f"测试{length} tokens的上下文处理能力",
            dimension=CapabilityDimension.MEMORY,
            input_data="很长的输入文本..." * (length // 100),  # 模拟长输入
            expected_output=None,
            evaluation_criteria=lambda output, expected: 1.0,  # 简化处理
            difficulty_level="hard"
        )

        # 运行测试
        # ...

    return results

实施建议

  1. 分层测试策略:从易到难,逐步测试LLM的能力边界
  2. 自动化与人工结合:自动化测试用于快速筛查,人工评估用于深度分析
  3. 持续监控:建立持续评估机制,跟踪模型版本迭代后的能力变化
  4. 领域定制:针对具体应用场景设计领域特定的测试用例
  5. 基准对比:与人类专家或其他基线方法进行对比,建立合理的期望

参考资料