Function resolveAddressesDwarf [src]

Prototype

pub fn resolveAddressesDwarf( cov: *Coverage, gpa: Allocator, sorted_pc_addrs: []const u64, output: []SourceLocation, d: *Dwarf, ) ResolveAddressesDwarfError!void

Parameters

cov: *Coveragegpa: Allocatorsorted_pc_addrs: []const u64Asserts the addresses are in ascending order. output: []SourceLocationAsserts its length equals length of sorted_pc_addrs. d: *Dwarf

Possible Errors

EndOfBuffer Error
InvalidBuffer Error
InvalidDebugInfo ScanError
MissingDebugInfo ScanError
OutOfMemory Error
Overflow Error

Source

pub fn resolveAddressesDwarf( cov: *Coverage, gpa: Allocator, /// Asserts the addresses are in ascending order. sorted_pc_addrs: []const u64, /// Asserts its length equals length of `sorted_pc_addrs`. output: []SourceLocation, d: *Dwarf, ) ResolveAddressesDwarfError!void { assert(sorted_pc_addrs.len == output.len); assert(d.ranges.items.len != 0); // call `populateRanges` first. var range_i: usize = 0; var range: *std.debug.Dwarf.Range = &d.ranges.items[0]; var line_table_i: usize = undefined; var prev_pc: u64 = 0; var prev_cu: ?*std.debug.Dwarf.CompileUnit = null; // Protects directories and files tables from other threads. cov.mutex.lock(); defer cov.mutex.unlock(); next_pc: for (sorted_pc_addrs, output) |pc, *out| { assert(pc >= prev_pc); prev_pc = pc; while (pc >= range.end) { range_i += 1; if (range_i >= d.ranges.items.len) { out.* = SourceLocation.invalid; continue :next_pc; } range = &d.ranges.items[range_i]; } if (pc < range.start) { out.* = SourceLocation.invalid; continue :next_pc; } const cu = &d.compile_unit_list.items[range.compile_unit_index]; if (cu != prev_cu) { prev_cu = cu; if (cu.src_loc_cache == null) { cov.mutex.unlock(); defer cov.mutex.lock(); d.populateSrcLocCache(gpa, cu) catch |err| switch (err) { error.MissingDebugInfo, error.InvalidDebugInfo => { out.* = SourceLocation.invalid; continue :next_pc; }, else => |e| return e, }; } const slc = &cu.src_loc_cache.?; const table_addrs = slc.line_table.keys(); line_table_i = std.sort.upperBound(u64, table_addrs, pc, struct { fn order(context: u64, item: u64) std.math.Order { return std.math.order(context, item); } }.order); } const slc = &cu.src_loc_cache.?; const table_addrs = slc.line_table.keys(); while (line_table_i < table_addrs.len and table_addrs[line_table_i] <= pc) line_table_i += 1; const entry = slc.line_table.values()[line_table_i - 1]; const corrected_file_index = entry.file - @intFromBool(slc.version < 5); const file_entry = slc.files[corrected_file_index]; const dir_path = slc.directories[file_entry.dir_index].path; try cov.string_bytes.ensureUnusedCapacity(gpa, dir_path.len + file_entry.path.len + 2); const dir_gop = try cov.directories.getOrPutContextAdapted(gpa, dir_path, String.SliceAdapter{ .string_bytes = cov.string_bytes.items, }, String.MapContext{ .string_bytes = cov.string_bytes.items, }); if (!dir_gop.found_existing) dir_gop.key_ptr.* = addStringAssumeCapacity(cov, dir_path); const file_gop = try cov.files.getOrPutContextAdapted(gpa, File.SliceAdapter.Entry{ .directory_index = @intCast(dir_gop.index), .basename = file_entry.path, }, File.SliceAdapter{ .string_bytes = cov.string_bytes.items, }, File.MapContext{ .string_bytes = cov.string_bytes.items, }); if (!file_gop.found_existing) file_gop.key_ptr.* = .{ .directory_index = @intCast(dir_gop.index), .basename = addStringAssumeCapacity(cov, file_entry.path), }; out.* = .{ .file = @enumFromInt(file_gop.index), .line = entry.line, .column = entry.column, }; } }