""" OffloadedTensor 统一测试套件 本文件整合了 OffloadedTensor 的所有测试,包括: 1. 基础功能验证 2. Chunked GEMM 测试 3. 同步分析 核心组件: - OffloadedTensor: 虚拟 GPU Tensor,支持透明 CPU/GPU 数据移动 - OffloadManager: LRU 缓存管理,支持同步/异步传输 - ChunkedOffloadLinear: 沿着 seqlen 维度分块的 Linear 层 """ import torch import torch.nn as nn import weakref import threading import time from typing import Optional, Dict, List, Tuple, Any from dataclasses import dataclass # ============================================================ # Part 1: 核心组件 # ============================================================ class OffloadedTensor(torch.Tensor): """ 虚拟 GPU Tensor:假装在 GPU 上,实际可能在 CPU 所有计算操作通过 __torch_dispatch__ 拦截, 在计算前自动加载数据到 GPU。 """ @staticmethod def __new__(cls, real_tensor: torch.Tensor, manager: 'OffloadManager', tensor_id: int): device = torch.device("cuda", torch.cuda.current_device()) ret = torch.Tensor._make_wrapper_subclass( cls, real_tensor.size(), strides=real_tensor.stride(), dtype=real_tensor.dtype, device=device, requires_grad=real_tensor.requires_grad ) ret._real_tensor = real_tensor ret._manager = weakref.ref(manager) ret._tensor_id = tensor_id return ret def __init__(self, real_tensor: torch.Tensor, manager: 'OffloadManager', tensor_id: int): self._real_tensor = real_tensor self._manager = weakref.ref(manager) self._tensor_id = tensor_id @property def device(self) -> torch.device: """永远返回 CUDA device,欺骗 PyTorch 的检查""" return torch.device("cuda", torch.cuda.current_device()) def to(self, *args, **kwargs): """拦截 .to() 调用""" device = None if args and isinstance(args[0], torch.device): device = args[0] elif 'device' in kwargs: device = kwargs['device'] if device and device.type == "cuda": return self return super().to(*args, **kwargs) def __torch_dispatch__(self, func, types, args=(), kwargs=None): """拦截所有 PyTorch 操作,自动加载数据""" kwargs = kwargs or {} manager = self._manager() if manager: manager.stats['dispatch_count'] += 1 # 特殊处理:detach 返回 self func_name = getattr(func, 'name', '') if isinstance(func_name, str) and 'detach' in func_name.lower(): return self # 解包 OffloadedTensor 为真实 tensor def unwrap(t): if isinstance(t, OffloadedTensor): mgr = t._manager() if mgr: return mgr.get_gpu_tensor(t._real_tensor, t._tensor_id) return t._real_tensor.cuda() return t new_args = torch.utils._pytree.tree_map(unwrap, args) new_kwargs = torch.utils._pytree.tree_map(unwrap, kwargs) result = func(*new_args, **new_kwargs) return result class OffloadManager: """ 管理 tensor 的卸载和预取 特性: - LRU 缓存管理 GPU 上的张量 - 支持同步/异步传输模式 - 完整的性能统计 """ def __init__( self, device: str = "cuda", offload_device: str = "cpu", max_gpu_tensors: int = 2, non_blocking: bool = False, ): self.device = torch.device(device) self.offload_device = torch.device(offload_device) self._gpu_pool: Dict[int, torch.Tensor] = {} self._cpu_storage: Dict[int, torch.Tensor] = {} self._lock = threading.Lock() self._tensor_id_counter = 0 self._max_gpu_tensors = max_gpu_tensors self._access_order: List[int] = [] self.non_blocking = non_blocking # 统计信息 self.stats = { 'load_count': 0, 'evict_count': 0, 'dispatch_count': 0, 'transfer_times_ms': [], } def _next_id(self) -> int: tid = self._tensor_id_counter self._tensor_id_counter += 1 return tid def wrap(self, tensor: torch.Tensor) -> OffloadedTensor: """包装 tensor 为虚拟 GPU tensor""" if isinstance(tensor, OffloadedTensor): return tensor tensor_id = self._next_id() cpu_tensor = tensor.detach().to(self.offload_device) self._cpu_storage[tensor_id] = cpu_tensor return OffloadedTensor(cpu_tensor, self, tensor_id) def get_gpu_tensor(self, real_tensor: torch.Tensor, tensor_id: int) -> torch.Tensor: """获取 GPU 上的数据(LRU 缓存)""" with self._lock: self.stats['load_count'] += 1 if tensor_id in self._gpu_pool: # 已在 GPU 上,更新 LRU if tensor_id in self._access_order: self._access_order.remove(tensor_id) self._access_order.append(tensor_id) return self._gpu_pool[tensor_id] # LRU 驱逐 while len(self._gpu_pool) >= self._max_gpu_tensors: if self._access_order: evict_id = self._access_order.pop(0) if evict_id in self._gpu_pool: del self._gpu_pool[evict_id] self.stats['evict_count'] += 1 else: break # 加载到 GPU cpu_tensor = self._cpu_storage.get(tensor_id, real_tensor) gpu_tensor = cpu_tensor.to(self.device, non_blocking=self.non_blocking) self._gpu_pool[tensor_id] = gpu_tensor self._access_order.append(tensor_id) return gpu_tensor def get_stats(self) -> Dict[str, Any]: """获取统计信息""" transfer_times = self.stats['transfer_times_ms'] return { 'load_count': self.stats['load_count'], 'evict_count': self.stats['evict_count'], 'dispatch_count': self.stats['dispatch_count'], 'gpu_pool_size': len(self._gpu_pool), 'total_tensors': len(self._cpu_storage), 'total_transfer_time_ms': sum(transfer_times), 'avg_transfer_time_ms': sum(transfer_times) / len(transfer_times) if transfer_times else 0, 'transfer_times_ms': list(transfer_times), } class OffloadModuleWrapper(nn.Module): """包装 nn.Module,实现参数级别的卸载""" def __init__(self, module: nn.Module, manager: OffloadManager): super().__init__() self._original_module = module self._manager = manager self._wrap_parameters(module, "") def _wrap_parameters(self, module: nn.Module, prefix: str): """递归包装模块的所有参数""" for name, param in list(module.named_parameters(recurse=False)): param.requires_grad_(False) wrapped = self._manager.wrap(param.data) delattr(module, name) setattr(module, name, wrapped) for child_name, child in list(module.named_children()): self._wrap_parameters(child, prefix + child_name + ".") def forward(self, *args, **kwargs): return self._original_module(*args, **kwargs) # ============================================================ # Part 2: 高级模块 # ============================================================ class ChunkedOffloadLinear(nn.Module): """ 沿着 seqlen 维度分块的 Linear 层 将输入 [seqlen, in_features] 分成多个 chunks,每个 chunk 独立进行 GEMM 计算。 weight 使用 OffloadedTensor,按需加载到 GPU。 Args: in_features: 输入特征维度 out_features: 输出特征维度 chunk_size: 每个 chunk 的大小 max_gpu_tensors: GPU 上最多缓存的 tensor 数量 non_blocking: 是否使用异步传输 """ def __init__( self, in_features: int, out_features: int, chunk_size: int = 4096, max_gpu_tensors: int = 2, non_blocking: bool = False, bias: bool = False, ): super().__init__() self.in_features = in_features self.out_features = out_features self.chunk_size = chunk_size self.manager = OffloadManager( max_gpu_tensors=max_gpu_tensors, non_blocking=non_blocking ) weight_tensor = torch.empty(out_features, in_features, dtype=torch.float16) nn.init.xavier_uniform_(weight_tensor) weight_tensor.requires_grad_(False) self.weight = self.manager.wrap(weight_tensor) self.bias = None if bias: self.bias = nn.Parameter(torch.empty(out_features)) def forward(self, x: torch.Tensor) -> torch.Tensor: seqlen = x.shape[0] if seqlen <= self.chunk_size: return self._compute_chunk(x) outputs = [] for start_idx in range(0, seqlen, self.chunk_size): end_idx = min(start_idx + self.chunk_size, seqlen) chunk = x[start_idx:end_idx] chunk_output = self._compute_chunk(chunk) outputs.append(chunk_output) return torch.cat(outputs, dim=0) def _compute_chunk(self, chunk: torch.Tensor) -> torch.Tensor: return torch.nn.functional.linear(chunk, self.weight, self.bias) # ============================================================ # 辅助函数 # ============================================================ def calculate_memory( seqlen: int, in_features: int, out_features: int, dtype: torch.dtype = torch.float16, ) -> Dict[str, float]: """计算显存占用(MB)""" element_size = torch.finfo(dtype).bits / 8 activation = seqlen * in_features * element_size / (1024 ** 2) weight = in_features * out_features * element_size / (1024 ** 2) output = seqlen * out_features * element_size / (1024 ** 2) total = activation + weight + output peak = max(activation, output) + weight return { 'activation_mb': activation, 'weight_mb': weight, 'output_mb': output, 'total_mb': total, 'peak_mb': peak, } def run_benchmark( layer: nn.Module, input_tensor: torch.Tensor, num_runs: int = 3, ) -> Dict[str, float]: """运行性能测试""" torch.cuda.synchronize() # Warmup with torch.no_grad(): _ = layer(input_tensor) torch.cuda.synchronize() # Benchmark start_time = time.time() for _ in range(num_runs): with torch.no_grad(): output = layer(input_tensor) torch.cuda.synchronize() elapsed = time.time() - start_time avg_time = elapsed / num_runs total_elements = input_tensor.numel() + output.numel() throughput = total_elements / avg_time / 1e6 return { 'avg_time_ms': avg_time * 1000, 'throughput_meps': throughput, } # ============================================================ # Part 3: 测试套件 - 功能测试 # ============================================================ def test_1_basic_offloaded_tensor(): """测试 OffloadedTensor 基本功能""" print("\n=== Test 1: Basic OffloadedTensor ===") if not torch.cuda.is_available(): print("CUDA not available, skipping") return manager = OffloadManager(max_gpu_tensors=2) t1 = torch.randn(4, 4) t2 = torch.randn(4, 4) t3 = torch.randn(4, 4) w1 = manager.wrap(t1) w2 = manager.wrap(t2) w3 = manager.wrap(t3) print(f"✓ Created OffloadedTensors") print(f" w1.device: {w1.device}") print(f" w2.device: {w2.device}") assert w1.device.type == "cuda" print(f"✓ is_cuda check passed") result = w1 + w2 print(f"✓ Addition works: {result.shape}") stats = manager.get_stats() print(f"✓ Manager stats: {stats}") print("PASSED\n") def test_2_mlp_with_offload(): """测试 MLP 模型使用 OffloadedTensor""" print("\n=== Test 2: MLP with OffloadedTensor ===") if not torch.cuda.is_available(): print("CUDA not available, skipping") return class SimpleMLP(nn.Module): def __init__(self, hidden_size=128, intermediate_size=256): super().__init__() self.gate_up_proj = nn.Linear(hidden_size, 2 * intermediate_size, bias=False) self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) def forward(self, x): gate, up = self.gate_up_proj(x).chunk(2, dim=-1) return self.down_proj(nn.functional.silu(gate) * up) hidden_size = 128 intermediate_size = 256 batch_size, seq_len = 2, 4 input_ids = torch.randn(batch_size, seq_len, hidden_size, device="cuda") model_original = SimpleMLP(hidden_size, intermediate_size) model_original.to("cuda") model_original.eval() with torch.no_grad(): expected = model_original(input_ids) state_dict = model_original.state_dict() model = SimpleMLP(hidden_size, intermediate_size) model.load_state_dict(state_dict) model.eval() offloaded_model, manager = apply_offload_to_model(model, max_gpu_tensors=2) offloaded_model.eval() with torch.no_grad(): output = offloaded_model(input_ids) print(f"✓ Forward pass completed: {output.shape}") stats = manager.get_stats() print(f"✓ Offload stats: {stats}") diff = (output - expected).abs().max().item() print(f"✓ Output correctness: max diff = {diff:.6f}") assert diff < 1e-5 print("PASSED\n") def apply_offload_to_model(model: nn.Module, max_gpu_tensors: int = 2): """应用卸载到模型的所有参数""" manager = OffloadManager(max_gpu_tensors=max_gpu_tensors) wrapper = OffloadModuleWrapper(model, manager) return wrapper, manager def test_3_lru_eviction(): """测试 LRU 驱逐机制""" print("\n=== Test 3: LRU Eviction ===") if not torch.cuda.is_available(): print("CUDA not available, skipping") return manager = OffloadManager(max_gpu_tensors=2) tensors = [torch.randn(2, 2) for _ in range(4)] wrapped = [manager.wrap(t) for t in tensors] print(f"✓ Created {len(wrapped)} OffloadedTensors") print(f" GPU pool capacity: {manager._max_gpu_tensors}") _ = wrapped[0] + wrapped[1] stats = manager.get_stats() print(f"✓ After accessing t1, t2: GPU pool = {stats['gpu_pool_size']}") _ = wrapped[2] + wrapped[2] stats = manager.get_stats() print(f"✓ After accessing t3: GPU pool = {stats['gpu_pool_size']}, evicted = {stats['evict_count']}") _ = wrapped[3] + wrapped[3] stats = manager.get_stats() print(f"✓ After accessing t4: GPU pool = {stats['gpu_pool_size']}, evicted = {stats['evict_count']}") assert stats['evict_count'] >= 1 print("PASSED\n") def test_4_correctness(): """测试输出正确性""" print("\n=== Test 4: Correctness Check ===") if not torch.cuda.is_available(): print("CUDA not available, skipping") return in_features = 512 out_features = 1024 seqlen = 4096 chunk_size = 1024 x = torch.randn(seqlen, in_features, device="cuda", dtype=torch.float16) # 创建标准层并保存权重 linear = nn.Linear(in_features, out_features, bias=False) linear.to("cuda", dtype=torch.float16) linear.eval() with torch.no_grad(): expected = linear(x) print(f"✓ Got expected output") # 创建 ChunkedOffloadLinear,使用相同的权重 chunked_layer = ChunkedOffloadLinear(in_features, out_features, chunk_size, max_gpu_tensors=2) # 复制权重到 chunked_layer with torch.no_grad(): weight_data = linear.weight.data.cpu() chunked_layer.manager._cpu_storage[0] = weight_data with torch.no_grad(): actual = chunked_layer(x) print(f"✓ Got actual output") diff = (actual - expected).abs().max().item() print(f"✓ Max difference: {diff:.6f}") assert diff < 1e-5 print("PASSED\n") # ============================================================ # Part 3: 测试套件 - 性能测试 # ============================================================ def test_5_memory_analysis(): """分析内存占用""" print("\n=== Test 5: Memory Analysis ===") in_features = 4096 out_features = 12244 chunk_size = 4096 seqlens = [4096, 16384, 65536, 131072] print(f"\nMemory Analysis (in={in_features}, out={out_features}, chunk={chunk_size}):") print(f"{'Seqlen':>10} | {'Activation':>12} | {'Weight':>12} | {'Output':>12} | {'Peak':>12} | {'Chunked':>12}") print("-" * 90) for seqlen in seqlens: full = calculate_memory(seqlen, in_features, out_features) chunked = calculate_memory(chunk_size, in_features, out_features) print(f"{seqlen:>10} | " f"{full['activation_mb']:>10.1f}MB | " f"{full['weight_mb']:>10.1f}MB | " f"{full['output_mb']:>10.1f}MB | " f"{full['peak_mb']:>10.1f}MB | " f"{chunked['peak_mb']:>10.1f}MB") print("\n✓ Chunked offload 显存占用恒定,与序列长度无关!") print("PASSED\n") def test_6_long_sequence(): """测试超长序列""" print("\n=== Test 6: Long Sequence (128K tokens) ===") if not torch.cuda.is_available(): print("CUDA not available, skipping") return in_features = 4096 out_features = 12244 seqlen = 128 * 1024 chunk_size = 4096 full = calculate_memory(seqlen, in_features, out_features) chunked = calculate_memory(chunk_size, in_features, out_features) print(f"Memory Comparison:") print(f" Full: {full['peak_mb']:.1f} MB") print(f" Chunked: {chunked['peak_mb']:.1f} MB") print(f" Savings: {(1 - chunked['peak_mb']/full['peak_mb'])*100:.1f}%") layer = ChunkedOffloadLinear(in_features, out_features, chunk_size, max_gpu_tensors=1) x = torch.randn(seqlen, in_features, device="cuda", dtype=torch.float16) with torch.no_grad(): start = time.time() output = layer(x) torch.cuda.synchronize() elapsed = (time.time() - start) * 1000 print(f"✓ Forward pass: {output.shape}") print(f" Time: {elapsed:.1f} ms") print(f" Throughput: {seqlen/elapsed/1e3:.1f}K tokens/sec") stats = layer.manager.get_stats() print(f"✓ Chunks processed: {seqlen // chunk_size}") print(f"✓ Load count: {stats['load_count']}") print("PASSED\n") def test_7_performance_comparison(): """性能对比测试""" print("\n=== Test 7: Performance Comparison ===") if not torch.cuda.is_available(): print("CUDA not available, skipping") return in_features = 4096 out_features = 12244 seqlen = 16384 chunk_size = 4096 x = torch.randn(seqlen, in_features, device="cuda", dtype=torch.float16) linear = nn.Linear(in_features, out_features, bias=False).cuda().half().eval() standard_stats = run_benchmark(linear, x, num_runs=5) print(f"✓ Standard Linear: {standard_stats['avg_time_ms']:.1f} ms") chunked_layer = ChunkedOffloadLinear(in_features, out_features, chunk_size, max_gpu_tensors=1) chunked_stats = run_benchmark(chunked_layer, x, num_runs=5) print(f"✓ ChunkedOffloadLinear: {chunked_stats['avg_time_ms']:.1f} ms") speedup = standard_stats['avg_time_ms'] / chunked_stats['avg_time_ms'] print(f"✓ Speedup: {speedup:.2f}x") print("PASSED\n") def test_8_transformers_layer(): """测试实际 transformers 权重""" print("\n=== Test 8: Transformers Layer Test ===") try: from transformers import AutoModelForCausalLM except ImportError: print("transformers not installed, skipping") return if not torch.cuda.is_available(): print("CUDA not available, skipping") return model_name = "Qwen/Qwen2.5-0.5B-Instruct" try: model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.float16, trust_remote_code=True, ) model.eval() model.to("cuda") except Exception as e: print(f"Failed to load model: {e}") return down_proj = model.model.layers[0].mlp.down_proj print(f"✓ Got layer: {down_proj.in_features} -> {down_proj.out_features}") batch_size, seq_len = 1, 4 test_input = torch.randn(batch_size, seq_len, down_proj.in_features, device="cuda", dtype=torch.float16) with torch.no_grad(): normal_output = down_proj(test_input) print(f"✓ Normal inference: {normal_output.shape}") import copy test_linear = nn.Linear(down_proj.in_features, down_proj.out_features, bias=False) test_linear.load_state_dict(copy.deepcopy(down_proj.state_dict())) test_linear.to("cuda", dtype=torch.float16) test_linear.eval() manager = OffloadManager(max_gpu_tensors=2) offloaded_layer = OffloadModuleWrapper(test_linear, manager) with torch.no_grad(): offload_output = offloaded_layer(test_input) print(f"✓ Offload inference: {offload_output.shape}") stats = manager.get_stats() print(f"✓ Stats: {stats}") diff = (offload_output - normal_output).abs().max().item() print(f"✓ Max diff: {diff:.6f}") assert diff < 1e-5 print("PASSED\n") # ============================================================ # Part 3: 测试套件 - 同步分析 # ============================================================ def test_9_sync_behavior_analysis(): """分析同步传输 vs 异步传输""" print("\n=== Test 9: Sync Behavior Analysis ===") if not torch.cuda.is_available(): print("CUDA not available, skipping") return in_features = 4096 out_features = 12244 seqlen = 16384 chunk_size = 4096 print(f"Config: in={in_features}, out={out_features}, seqlen={seqlen}, chunk={chunk_size}") print(f"Num chunks: {seqlen // chunk_size}") x = torch.randn(seqlen, in_features, device="cuda", dtype=torch.float16) # 同步版本 print(f"\n--- 同步传输 (non_blocking=False) ---") layer_sync = ChunkedOffloadLinear(in_features, out_features, chunk_size, non_blocking=False) with torch.no_grad(): start = time.time() _ = layer_sync(x) torch.cuda.synchronize() sync_time_ms = (time.time() - start) * 1000 stats_sync = layer_sync.manager.get_stats() print(f"总时间: {sync_time_ms:.2f} ms") print(f"传输时间: {stats_sync['total_transfer_time_ms']:.2f} ms") print(f"计算时间: {sync_time_ms - stats_sync['total_transfer_time_ms']:.2f} ms") print(f"加载次数: {stats_sync['load_count']}") # 异步版本 print(f"\n--- 异步传输 (non_blocking=True) ---") layer_async = ChunkedOffloadLinear(in_features, out_features, chunk_size, non_blocking=True) with torch.no_grad(): start = time.time() _ = layer_async(x) torch.cuda.synchronize() async_time_ms = (time.time() - start) * 1000 stats_async = layer_async.manager.get_stats() print(f"总时间: {async_time_ms:.2f} ms") print(f"传输时间: {stats_async['total_transfer_time_ms']:.2f} ms") print(f"计算时间: {async_time_ms - stats_async['total_transfer_time_ms']:.2f} ms") print(f"加载次数: {stats_async['load_count']}") # 对比 print(f"\n--- 对比 ---") print(f"总加速比: {sync_time_ms / async_time_ms:.2f}x") if stats_async['total_transfer_time_ms'] > 0: print(f"传输加速比: {stats_sync['total_transfer_time_ms'] / stats_async['total_transfer_time_ms']:.2f}x") print("\n关键发现:") print(f" 1. 同步传输阻塞 CPU 线程") print(f" 2. 异步传输可提高吞吐量") print(f" 3. 首次运行包含 JIT 编译开销") print("PASSED\n") def test_10_profiler_analysis(): """使用 Profiler 分析内核执行""" print("\n=== Test 10: Profiler Analysis ===") if not torch.cuda.is_available(): print("CUDA not available, skipping") return in_features = 4096 out_features = 12244 seqlen = 16384 chunk_size = 4096 layer = ChunkedOffloadLinear(in_features, out_features, chunk_size) x = torch.randn(seqlen, in_features, device="cuda", dtype=torch.float16) with torch.profiler.profile(activities=[torch.profiler.ProfilerActivity.CUDA]) as p: with torch.no_grad(): _ = layer(x) torch.cuda.synchronize() kernel_counts = {} for event in p.key_averages(): if event.device_type == torch.profiler.DeviceType.CUDA: name = event.key kernel_counts[name] = kernel_counts.get(name, 0) + 1 print(f"内核调用统计:") print(f"{'内核类型':<50} {'调用次数':<10}") print("-" * 60) for name, count in sorted(kernel_counts.items(), key=lambda x: -x[1])[:15]: name_short = name[:48] print(f"{name_short:<50} {count:<10}") memcpy_count = sum(count for name, count in kernel_counts.items() if 'memcpy' in name.lower()) print(f"\n分析:") print(f" - 总共 {len(kernel_counts)} 种不同的 CUDA 内核") print(f" - 总调用次数: {sum(kernel_counts.values())}") print(f" - 内存拷贝: {memcpy_count} 次") print("PASSED\n") # ============================================================ # 主测试入口 # ============================================================ def main(): """运行所有测试""" print("=" * 70) print("OffloadedTensor 统一测试套件") print("=" * 70) # 功能测试 print("\n" + "=" * 70) print("功能测试 (Tests 1-4)") print("=" * 70) test_1_basic_offloaded_tensor() test_2_mlp_with_offload() test_3_lru_eviction() test_4_correctness() # 性能测试 print("\n" + "=" * 70) print("性能测试 (Tests 5-8)") print("=" * 70) test_5_memory_analysis() test_6_long_sequence() test_7_performance_comparison() test_8_transformers_layer() # 同步分析 print("\n" + "=" * 70) print("同步分析 (Tests 9-10)") print("=" * 70) test_9_sync_behavior_analysis() test_10_profiler_analysis() print("=" * 70) print("所有测试完成!") print("=" * 70) if __name__ == "__main__": main()