Fix issue causing missing schema entries

This commit is contained in:
a2x
2024-04-07 03:21:07 +10:00
parent 6d72c517ed
commit 7c9d594ca6
92 changed files with 29707 additions and 3399 deletions

View File

@@ -1,7 +1,5 @@
use std::collections::BTreeMap;
use std::ffi::CStr;
use std::mem;
use std::ops::Add;
use log::debug;
@@ -29,7 +27,7 @@ pub struct Class {
pub name: String,
pub module_name: String,
pub parent: Option<Box<Class>>,
pub metadata: Option<Vec<ClassMetadata>>,
pub metadata: Vec<ClassMetadata>,
pub fields: Vec<ClassField>,
}
@@ -37,14 +35,14 @@ pub struct Class {
pub struct ClassField {
pub name: String,
pub type_name: String,
pub offset: u32,
pub offset: i32,
}
#[derive(Clone, Deserialize, Serialize)]
pub struct Enum {
pub name: String,
pub alignment: u8,
pub size: u16,
pub size: i16,
pub members: Vec<EnumMember>,
}
@@ -56,7 +54,7 @@ pub struct EnumMember {
#[derive(Clone, Deserialize, Serialize)]
pub struct TypeScope {
pub name: String,
pub module_name: String,
pub classes: Vec<Class>,
pub enums: Vec<Enum>,
}
@@ -67,7 +65,12 @@ pub fn schemas(process: &mut IntoProcessInstanceArcBox<'_>) -> Result<SchemaMap>
let map: BTreeMap<_, _> = type_scopes
.into_iter()
.map(|type_scope| (type_scope.name, (type_scope.classes, type_scope.enums)))
.map(|type_scope| {
(
type_scope.module_name,
(type_scope.classes, type_scope.enums),
)
})
.collect();
Ok(map)
@@ -86,16 +89,15 @@ fn read_class_binding(
let name = binding.name.read_string(process)?.to_string();
let parent = binding.base_classes.non_null().and_then(|ptr| {
let base_class = ptr.read(process).ok()?;
let parent = binding
.base_classes
.non_null()
.and_then(|ptr| ptr.read(process).ok())
.and_then(|base_class| read_class_binding(process, base_class.prev).ok())
.map(Box::new);
read_class_binding(process, base_class.prev)
.ok()
.map(Box::new)
});
let metadata = read_class_binding_metadata(process, &binding).map(Some)?;
let fields = read_class_binding_fields(process, &binding)?;
let metadata = read_class_binding_metadata(process, &binding)?;
debug!(
"found class: {} at {:#X} (module name: {}) (parent name: {:?}) (metadata count: {}) (fields count: {})",
@@ -103,8 +105,8 @@ fn read_class_binding(
binding_ptr.to_umem(),
module_name,
parent.as_ref().map(|parent| parent.name.clone()),
metadata.as_ref().map(|metadata| metadata.len()).unwrap_or(0),
fields.len()
metadata.len(),
fields.len(),
);
Ok(Class {
@@ -121,36 +123,30 @@ fn read_class_binding_fields(
binding: &SchemaClassBinding,
) -> Result<Vec<ClassField>> {
if binding.fields.is_null() {
return Err(Error::Other("schema class fields is null"));
return Ok(Vec::new());
}
(0..binding.num_fields)
.map(|i| {
let field_ptr: Pointer64<SchemaClassFieldData> = binding
.fields
.address()
.add(i * mem::size_of::<SchemaClassFieldData>() as u16)
.into();
(0..binding.num_fields).try_fold(Vec::new(), |mut acc, i| {
let field = binding.fields.at(i as _).read(process)?;
let field = field_ptr.read(process)?;
if field.schema_type.is_null() {
return Ok(acc);
}
if field.type_.is_null() {
return Err(Error::Other("schema field type is null"));
}
let name = field.name.read_string(process)?.to_string();
let type_ = field.schema_type.read(process)?;
let name = field.name.read_string(process)?.to_string();
let type_ = field.type_.read(process)?;
// TODO: Parse this properly.
let type_name = type_.name.read_string(process)?.replace(" ", "");
// TODO: Parse this properly.
let type_name = type_.name.read_string(process)?.replace(" ", "");
acc.push(ClassField {
name,
type_name,
offset: field.offset,
});
Ok(ClassField {
name,
type_name,
offset: field.offset,
})
})
.collect()
Ok(acc)
})
}
fn read_class_binding_metadata(
@@ -158,43 +154,40 @@ fn read_class_binding_metadata(
binding: &SchemaClassBinding,
) -> Result<Vec<ClassMetadata>> {
if binding.static_metadata.is_null() {
return Err(Error::Other("schema class metadata is null"));
return Ok(Vec::new());
}
(0..binding.num_static_metadata)
.map(|i| {
let metadata_ptr =
Pointer64::<SchemaMetadataEntryData>::from(binding.static_metadata.offset(i as _));
(0..binding.num_static_metadata).try_fold(Vec::new(), |mut acc, i| {
let metadata = binding.static_metadata.at(i as _).read(process)?;
let metadata = metadata_ptr.read(process)?;
if metadata.network_value.is_null() {
return Ok(acc);
}
if metadata.network_value.is_null() {
return Err(Error::Other("schema class metadata network value is null"));
}
let name = metadata.name.read_string(process)?.to_string();
let network_value = metadata.network_value.read(process)?;
let name = metadata.name.read_string(process)?.to_string();
let network_value = metadata.network_value.read(process)?;
let metadata = match name.as_str() {
"MNetworkChangeCallback" => unsafe {
let name = network_value.u.name_ptr.read_string(process)?.to_string();
let metadata = match name.as_str() {
"MNetworkChangeCallback" => unsafe {
let name = network_value.u.name_ptr.read_string(process)?.to_string();
ClassMetadata::NetworkChangeCallback { name }
},
"MNetworkVarNames" => unsafe {
let var_value = network_value.u.var_value;
ClassMetadata::NetworkChangeCallback { name }
},
"MNetworkVarNames" => unsafe {
let var_value = network_value.u.var_value;
let name = var_value.name.read_string(process)?.to_string();
let type_name = var_value.type_name.read_string(process)?.replace(" ", "");
let name = var_value.name.read_string(process)?.to_string();
let type_name = var_value.type_name.read_string(process)?.replace(" ", "");
ClassMetadata::NetworkVarNames { name, type_name }
},
_ => ClassMetadata::Unknown { name },
};
ClassMetadata::NetworkVarNames { name, type_name }
},
_ => ClassMetadata::Unknown { name },
};
acc.push(metadata);
Ok(metadata)
})
.collect()
Ok(acc)
})
}
fn read_enum_binding(
@@ -217,7 +210,7 @@ fn read_enum_binding(
Ok(Enum {
name,
alignment: binding.alignment,
size: binding.size,
size: binding.num_enumerators,
members,
})
}
@@ -226,30 +219,28 @@ fn read_enum_binding_members(
process: &mut IntoProcessInstanceArcBox<'_>,
binding: &SchemaEnumBinding,
) -> Result<Vec<EnumMember>> {
(0..binding.size)
.map(|i| {
let enum_info_ptr: Pointer64<SchemaEnumeratorInfoData> = binding
.enum_info
.address()
.add(i * mem::size_of::<SchemaEnumeratorInfoData>() as u16)
.into();
if binding.enumerators.is_null() {
return Ok(Vec::new());
}
let enum_info = enum_info_ptr.read(process)?;
let name = enum_info.name.read_string(process)?.to_string();
(0..binding.num_enumerators).try_fold(Vec::new(), |mut acc, i| {
let enumerator = binding.enumerators.at(i as _).read(process)?;
let name = enumerator.name.read_string(process)?.to_string();
let value = {
let value = unsafe { enum_info.u.ulong } as i64;
let value = {
let value = unsafe { enumerator.u.ulong } as i64;
if value == i64::MAX {
-1
} else {
value
}
};
if value == i64::MAX {
-1
} else {
value
}
};
Ok(EnumMember { name, value })
})
.collect()
acc.push(EnumMember { name, value });
Ok(acc)
})
}
fn read_schema_system(process: &mut IntoProcessInstanceArcBox<'_>) -> Result<SchemaSystem> {
@@ -287,7 +278,7 @@ fn read_type_scopes(
let type_scope_ptr = type_scopes.element(process, i as _)?;
let type_scope = type_scope_ptr.read(process)?;
let name = unsafe { CStr::from_ptr(type_scope.name.as_ptr()) }
let module_name = unsafe { CStr::from_ptr(type_scope.name.as_ptr()) }
.to_string_lossy()
.to_string();
@@ -307,14 +298,14 @@ fn read_type_scopes(
debug!(
"found type scope: {} at {:#X} (classes count: {}) (enums count: {})",
name,
module_name,
type_scope_ptr.to_umem(),
classes.len(),
enums.len()
);
Ok(TypeScope {
name,
module_name,
classes,
enums,
})

View File

@@ -17,7 +17,7 @@ impl CodeGen for InterfaceMap {
fmt.block(
&format!(
"public static class {}",
AsPascalCase(Self::sanitize_name(module_name))
AsPascalCase(Self::slugify(module_name))
),
false,
|fmt| {
@@ -52,10 +52,7 @@ impl CodeGen for InterfaceMap {
writeln!(fmt, "// Module: {}", module_name)?;
fmt.block(
&format!(
"namespace {}",
AsSnakeCase(Self::sanitize_name(module_name))
),
&format!("namespace {}", AsSnakeCase(Self::slugify(module_name))),
false,
|fmt| {
for iface in ifaces {
@@ -108,7 +105,7 @@ impl CodeGen for InterfaceMap {
writeln!(fmt, "// Module: {}", module_name)?;
fmt.block(
&format!("pub mod {}", AsSnakeCase(Self::sanitize_name(module_name))),
&format!("pub mod {}", AsSnakeCase(Self::slugify(module_name))),
false,
|fmt| {
for iface in ifaces {

View File

@@ -50,12 +50,13 @@ trait CodeGen {
fn to_rs(&self, results: &Results, indent_size: usize) -> Result<String>;
/// Replaces non-alphanumeric characters in a string with underscores.
#[inline]
fn sanitize_name(name: &str) -> String {
name.replace(|c: char| !c.is_alphanumeric(), "_")
fn slugify(input: &str) -> String {
input.replace(|c: char| !c.is_alphanumeric(), "_")
}
fn write_content<F>(&self, results: &Results, indent_size: usize, callback: F) -> Result<String>
fn write_content<F>(&self, results: &Results, indent_size: usize, f: F) -> Result<String>
where
F: FnOnce(&mut Formatter<'_>) -> Result<()>,
{
@@ -64,7 +65,7 @@ trait CodeGen {
results.write_banner(&mut fmt)?;
callback(&mut fmt)?;
f(&mut fmt)?;
Ok(buf)
}
@@ -148,6 +149,7 @@ impl Results {
out_dir: P,
indent_size: usize,
) -> Result<()> {
// TODO: Make this user-configurable.
const FILE_EXTS: &[&str] = &["cs", "hpp", "json", "rs"];
fs::create_dir_all(&out_dir)?;
@@ -162,8 +164,8 @@ impl Results {
self.dump_item(item, &out_dir, indent_size, FILE_EXTS, file_name)?;
}
self.dump_schemas(&out_dir, indent_size, FILE_EXTS)?;
self.dump_info(process, &out_dir)?;
self.dump_schemas(&out_dir, indent_size, FILE_EXTS)?;
Ok(())
}

View File

@@ -16,7 +16,7 @@ impl CodeGen for OffsetMap {
fmt.block(
&format!(
"public static class {}",
AsPascalCase(Self::sanitize_name(module_name))
AsPascalCase(Self::slugify(module_name))
),
false,
|fmt| {
@@ -47,10 +47,7 @@ impl CodeGen for OffsetMap {
writeln!(fmt, "// Module: {}", module_name)?;
fmt.block(
&format!(
"namespace {}",
AsSnakeCase(Self::sanitize_name(module_name))
),
&format!("namespace {}", AsSnakeCase(Self::slugify(module_name))),
false,
|fmt| {
for (name, value) in offsets {
@@ -91,7 +88,7 @@ impl CodeGen for OffsetMap {
writeln!(fmt, "// Module: {}", module_name)?;
fmt.block(
&format!("pub mod {}", AsSnakeCase(Self::sanitize_name(module_name))),
&format!("pub mod {}", AsSnakeCase(Self::slugify(module_name))),
false,
|fmt| {
for (name, value) in offsets {

View File

@@ -27,7 +27,7 @@ impl CodeGen for SchemaMap {
fmt.block(
&format!(
"public static class {}",
AsPascalCase(Self::sanitize_name(module_name))
AsPascalCase(Self::slugify(module_name))
),
false,
|fmt| {
@@ -46,11 +46,13 @@ impl CodeGen for SchemaMap {
fmt.block(
&format!(
"public enum {} : {}",
Self::sanitize_name(&enum_.name),
Self::slugify(&enum_.name),
type_name
),
false,
|fmt| {
// TODO: Handle the case where multiple members share
// the same value.
let members = enum_
.members
.iter()
@@ -69,21 +71,16 @@ impl CodeGen for SchemaMap {
let parent_name = class
.parent
.as_ref()
.map(|parent| Self::sanitize_name(&parent.name))
.map(|parent| Self::slugify(&parent.name))
.unwrap_or_else(|| "None".to_string());
writeln!(fmt, "// Parent: {}", parent_name)?;
writeln!(fmt, "// Fields count: {}", class.fields.len())?;
if let Some(metadata) = &class.metadata {
write_metadata(fmt, metadata)?;
}
write_metadata(fmt, &class.metadata)?;
fmt.block(
&format!(
"public static class {}",
Self::sanitize_name(&class.name)
),
&format!("public static class {}", Self::slugify(&class.name)),
false,
|fmt| {
for field in &class.fields {
@@ -129,10 +126,7 @@ impl CodeGen for SchemaMap {
writeln!(fmt, "// Enums count: {}", enums.len())?;
fmt.block(
&format!(
"namespace {}",
AsSnakeCase(Self::sanitize_name(module_name))
),
&format!("namespace {}", AsSnakeCase(Self::slugify(module_name))),
false,
|fmt| {
for enum_ in enums {
@@ -150,11 +144,13 @@ impl CodeGen for SchemaMap {
fmt.block(
&format!(
"enum class {} : {}",
Self::sanitize_name(&enum_.name),
Self::slugify(&enum_.name),
type_name
),
true,
|fmt| {
// TODO: Handle the case where multiple members share
// the same value.
let members = enum_
.members
.iter()
@@ -173,18 +169,16 @@ impl CodeGen for SchemaMap {
let parent_name = class
.parent
.as_ref()
.map(|parent| Self::sanitize_name(&parent.name))
.map(|parent| Self::slugify(&parent.name))
.unwrap_or_else(|| "None".to_string());
writeln!(fmt, "// Parent: {}", parent_name)?;
writeln!(fmt, "// Fields count: {}", class.fields.len())?;
if let Some(metadata) = &class.metadata {
write_metadata(fmt, metadata)?;
}
write_metadata(fmt, &class.metadata)?;
fmt.block(
&format!("namespace {}", Self::sanitize_name(&class.name)),
&format!("namespace {}", Self::slugify(&class.name)),
false,
|fmt| {
for field in &class.fields {
@@ -228,8 +222,6 @@ impl CodeGen for SchemaMap {
let metadata: Vec<_> = class
.metadata
.as_ref()
.unwrap_or(&vec![])
.iter()
.map(|metadata| match metadata {
ClassMetadata::NetworkChangeCallback { name } => json!({
@@ -249,7 +241,7 @@ impl CodeGen for SchemaMap {
.collect();
(
Self::sanitize_name(&class.name),
Self::slugify(&class.name),
json!({
"parent": class.parent.as_ref().map(|parent| &parent.name),
"fields": fields,
@@ -277,7 +269,7 @@ impl CodeGen for SchemaMap {
};
(
Self::sanitize_name(&enum_.name),
Self::slugify(&enum_.name),
json!({
"alignment": enum_.alignment,
"type": type_name,
@@ -320,7 +312,7 @@ impl CodeGen for SchemaMap {
writeln!(fmt, "// Enums count: {}", enums.len())?;
fmt.block(
&format!("pub mod {}", AsSnakeCase(Self::sanitize_name(module_name))),
&format!("pub mod {}", AsSnakeCase(Self::slugify(module_name))),
false,
|fmt| {
for enum_ in enums {
@@ -339,7 +331,7 @@ impl CodeGen for SchemaMap {
&format!(
"#[repr({})]\npub enum {}",
type_name,
Self::sanitize_name(&enum_.name),
Self::slugify(&enum_.name),
),
false,
|fmt| {
@@ -363,18 +355,16 @@ impl CodeGen for SchemaMap {
let parent_name = class
.parent
.as_ref()
.map(|parent| Self::sanitize_name(&parent.name))
.map(|parent| Self::slugify(&parent.name))
.unwrap_or_else(|| "None".to_string());
writeln!(fmt, "// Parent: {}", parent_name)?;
writeln!(fmt, "// Fields count: {}", class.fields.len())?;
if let Some(metadata) = &class.metadata {
write_metadata(fmt, metadata)?;
}
write_metadata(fmt, &class.metadata)?;
fmt.block(
&format!("pub mod {}", Self::sanitize_name(&class.name)),
&format!("pub mod {}", Self::slugify(&class.name)),
false,
|fmt| {
for field in &class.fields {

View File

@@ -1,3 +1,3 @@
pub use input::*;
pub use input::KeyButton;
pub mod input;

View File

@@ -82,7 +82,7 @@ pub struct SchemaAtomicTTF {
#[derive(Pod)]
#[repr(C)]
pub struct SchemaBaseClassInfoData {
pub offset: u32, // 0x0000
pub offset: i32, // 0x0000
pad_0004: [u8; 0x4], // 0x0004
pub prev: Pointer64<SchemaClassInfoData>, // 0x0008
}
@@ -91,9 +91,9 @@ pub struct SchemaBaseClassInfoData {
#[repr(C)]
pub struct SchemaClassFieldData {
pub name: Pointer64<ReprCString>, // 0x0000
pub type_: Pointer64<SchemaType>, // 0x0008
pub offset: u32, // 0x0010
pub num_metadata: u32, // 0x0014
pub schema_type: Pointer64<SchemaType>, // 0x0008
pub offset: i32, // 0x0010
pub num_metadata: i32, // 0x0014
pub metadata: Pointer64<SchemaMetadataEntryData>, // 0x0018
}
@@ -101,41 +101,43 @@ pub struct SchemaClassFieldData {
#[derive(Pod)]
#[repr(C)]
pub struct SchemaClassInfoData {
pub base: Pointer64<SchemaClassInfoData>, // 0x0000
pub name: Pointer64<ReprCString>, // 0x0008
pub module_name: Pointer64<ReprCString>, // 0x0010
pub size: u32, // 0x0018
pub num_fields: u16, // 0x001C
pub num_static_fields: u16, // 0x001E
pub num_static_metadata: u16, // 0x0020
pub alignment: u8, // 0x0022
pub has_base_class: u8, // 0x0023
pub total_class_size: u16, // 0x0024
pub derived_class_size: u16, // 0x0026
pub fields: Pointer64<SchemaClassFieldData>, // 0x0028
pub static_fields: Pointer64<SchemaStaticFieldData>, // 0x0030
pub base_classes: Pointer64<SchemaBaseClassInfoData>, // 0x0038
pad_0040: [u8; 0x8], // 0x0040
pub static_metadata: Pointer64<SchemaMetadataEntryData>, // 0x0048
pub type_scope: Pointer64<SchemaSystemTypeScope>, // 0x0050
pub type_: Pointer64<SchemaType>, // 0x0058
pad_0060: [u8; 0x10], // 0x0060
pub base: Pointer64<SchemaClassInfoData>, // 0x0000
pub name: Pointer64<ReprCString>, // 0x0008
pub module_name: Pointer64<ReprCString>, // 0x0010
pub size: i32, // 0x0018
pub num_fields: i16, // 0x001C
pub num_static_fields: i16, // 0x001E
pub num_static_metadata: i16, // 0x0020
pub alignment: u8, // 0x0022
pub has_base_class: u8, // 0x0023
pub total_class_size: i16, // 0x0024
pub derived_class_size: i16, // 0x0026
pub fields: Pointer64<[SchemaClassFieldData]>, // 0x0028
pub static_fields: Pointer64<[SchemaStaticFieldData]>, // 0x0030
pub base_classes: Pointer64<SchemaBaseClassInfoData>, // 0x0038
pad_0040: [u8; 0x8], // 0x0040
pub static_metadata: Pointer64<[SchemaMetadataEntryData]>, // 0x0048
pub type_scope: Pointer64<SchemaSystemTypeScope>, // 0x0050
pub schema_type: Pointer64<SchemaType>, // 0x0058
pad_0060: [u8; 0x10], // 0x0060
}
#[rustfmt::skip]
#[derive(Pod)]
#[repr(C)]
pub struct SchemaEnumInfoData {
pub base: Pointer64<SchemaEnumInfoData>,
pub name: Pointer64<ReprCString>,
pub module_name: Pointer64<ReprCString>,
pub alignment: u8,
pad_0019: [u8; 0x3],
pub size: u16,
pub num_static_metadata: u16,
pub enum_info: Pointer64<SchemaEnumeratorInfoData>,
pub static_metadata: Pointer64<SchemaMetadataEntryData>,
pub type_scope: Pointer64<SchemaSystemTypeScope>,
pad_0038: [u8; 0x10],
pub base: Pointer64<SchemaEnumInfoData>, // 0x0000
pub name: Pointer64<ReprCString>, // 0x0008
pub module_name: Pointer64<ReprCString>, // 0x0010
pub size: u8, // 0x0018
pub alignment: u8, // 0x0019
pad_001a: u16, // 0x001A
pub num_enumerators: i16, // 0x001C
pub num_static_metadata: i16, // 0x001E
pub enumerators: Pointer64<[SchemaEnumeratorInfoData]>, // 0x0020
pub static_metadata: Pointer64<SchemaMetadataEntryData>, // 0x0028
pub type_scope: Pointer64<SchemaSystemTypeScope>, // 0x0030
pad_0038: [u8; 0x10], // 0x0038
}
#[repr(C)]

View File

@@ -1,8 +1,8 @@
pub use interface::*;
pub use utl_memory::*;
pub use utl_memory_pool::*;
pub use utl_ts_hash::*;
pub use utl_vector::*;
pub use interface::InterfaceReg;
pub use utl_memory::UtlMemory;
pub use utl_memory_pool::UtlMemoryPoolBase;
pub use utl_ts_hash::UtlTsHash;
pub use utl_vector::UtlVector;
pub mod interface;
pub mod utl_memory;

View File

@@ -1,30 +1,49 @@
use memflow::prelude::v1::*;
#[repr(u32)]
pub enum MemoryPoolGrowType {
None = 0,
Fast,
Slow,
RbTree,
}
#[derive(Pod)]
#[repr(C)]
pub struct Blob {
pub next: Pointer64<Blob>, // 0x0000
pub num_bytes: i32, // 0x0008
pub data: [u8; 1], // 0x000C
pad_000d: [u8; 3], // 0x000D
}
#[derive(Pod)]
#[repr(C)]
pub struct FreeList {
pub next: Pointer64<FreeList>, // 0x0000
}
/// Represents an optimized pool memory allocator.
#[repr(C)]
pub struct UtlMemoryPool {
pub block_size: i32, // 0x0000
pub blocks_per_blob: i32, // 0x0004
pub grow_mode: i32, // 0x0008
pub blocks_alloc: i32, // 0x000C
pub block_alloc_size: i32, // 0x0010
pub peak_alloc: i32, // 0x0014
pub struct UtlMemoryPoolBase {
pub block_size: i32, // 0x0000
pub blocks_per_blob: i32, // 0x0004
pub grow_mode: MemoryPoolGrowType, // 0x0008
pub blocks_alloc: i32, // 0x000C
pub peak_alloc: i32, // 0x0010
pub alignment: u16, // 0x0014
pub num_blobs: u16, // 0x0016
pub free_list_tail: Pointer64<Pointer64<FreeList>>, // 0x0018
pub free_list_head: Pointer64<FreeList>, // 0x0020
pad_0028: [u8; 0x44], // 0x0028
pub blob_head: Pointer64<Blob>, // 0x0070
pad_0078: [u8; 0x8], // 0x0078
}
impl UtlMemoryPool {
/// Returns the size of a block.
impl UtlMemoryPoolBase {
/// Returns the total size of the memory pool.
#[inline]
pub fn block_size(&self) -> i32 {
self.block_size
}
/// Returns the number of allocated blocks per blob.
#[inline]
pub fn count(&self) -> i32 {
self.blocks_per_blob
}
/// Returns the maximum number of allocated blocks.
#[inline]
pub fn peak_count(&self) -> i32 {
self.peak_alloc
pub fn size(&self) -> i32 {
(self.num_blobs as i32 * self.blocks_per_blob) * self.block_size
}
}

View File

@@ -1,6 +1,6 @@
use memflow::prelude::v1::*;
use super::UtlMemoryPool;
use super::UtlMemoryPoolBase;
use crate::error::Result;
use crate::mem::IsNull;
@@ -17,79 +17,98 @@ unsafe impl<D: 'static> Pod for HashAllocatedBlob<D> {}
#[repr(C)]
pub struct HashBucket<D, K> {
pad_0000: [u8; 0x18], // 0x0000,
pub first: Pointer64<HashFixedData<D, K>>, // 0x0018
pub first_uncommitted: Pointer64<HashFixedData<D, K>>, // 0x0020
pad_0000: [u8; 0x18], // 0x0000,
pub first: Pointer64<HashFixedDataInternal<D, K>>, // 0x0018
pub first_uncommitted: Pointer64<HashFixedDataInternal<D, K>>, // 0x0020
}
#[repr(C)]
pub struct HashFixedData<D, K> {
pub ui_key: K, // 0x0000
pub next: Pointer64<HashFixedData<D, K>>, // 0x0008
pub data: D, // 0x0010
pub struct HashFixedDataInternal<D, K> {
pub ui_key: K, // 0x0000
pub next: Pointer64<HashFixedDataInternal<D, K>>, // 0x0008
pub data: D, // 0x0010
}
unsafe impl<D: 'static, K: 'static> Pod for HashFixedData<D, K> {}
unsafe impl<D: 'static, K: 'static> Pod for HashFixedDataInternal<D, K> {}
/// Represents a thread-safe hash table.
#[repr(C)]
pub struct UtlTsHash<D, const C: usize = 256, K = u64> {
pub entry_mem: UtlMemoryPool, // 0x0000
pad_0018: [u8; 0x8], // 0x0018
pub blobs: Pointer64<HashAllocatedBlob<D>>, // 0x0020
pad_0028: [u8; 0x58], // 0x0028
pub buckets: [HashBucket<D, K>; C], // 0x0080
pad_2880: [u8; 0x10], // 0x2880
pub entry_mem: UtlMemoryPoolBase, // 0x0000
pub buckets: [HashBucket<D, K>; C], // 0x0080
pub needs_commit: bool, // 0x2880
pad_2881: [u8; 0xF], // 0x2881
}
impl<D: Pod + IsNull, const C: usize, K: Pod> UtlTsHash<D, C, K> {
/// Returns the number of allocated blocks.
#[inline]
pub fn blocks_alloc(&self) -> i32 {
self.entry_mem.blocks_alloc
}
/// Returns the size of a block.
#[inline]
pub fn block_size(&self) -> i32 {
self.entry_mem.block_size
}
/// Returns the maximum number of allocated blocks.
#[inline]
pub fn peak_count(&self) -> i32 {
self.entry_mem.peak_alloc
}
/// Returns all elements in the hash table.
pub fn elements(&self, process: &mut IntoProcessInstanceArcBox<'_>) -> Result<Vec<D>> {
// TODO: Refactor this.
let blocks_alloc = self.blocks_alloc() as usize;
let peak_alloc = self.peak_count() as usize;
let mut elements: Vec<_> = self
.buckets
.iter()
.flat_map(|bucket| {
let mut cur_element = bucket.first;
let mut allocated_list = Vec::with_capacity(peak_alloc);
let mut unallocated_list = Vec::with_capacity(blocks_alloc);
let mut list = Vec::new();
for bucket in &self.buckets {
let mut cur_element = bucket.first_uncommitted;
while !cur_element.is_null() {
if let Ok(element) = cur_element.read(process) {
if !element.data.is_null() {
list.push(element.data);
}
while !cur_element.is_null() {
let element = cur_element.read(process)?;
cur_element = element.next;
}
if !element.data.is_null() {
unallocated_list.push(element.data);
}
list
})
.collect();
if let Ok(blob) = self.blobs.read(process) {
let mut unallocated_data = blob.next;
if !unallocated_data.is_null() {
if !blob.data.is_null() {
elements.push(blob.data);
// Check if we have too many elements.
if unallocated_list.len() >= blocks_alloc {
break;
}
while !unallocated_data.is_null() {
if let Ok(element) = unallocated_data.read(process) {
if !element.data.is_null() {
elements.push(element.data);
}
unallocated_data = element.next;
}
}
cur_element = element.next;
}
}
Ok(elements)
let mut cur_blob =
Pointer64::<HashAllocatedBlob<D>>::from(self.entry_mem.free_list_head.address());
while !cur_blob.is_null() {
let blob = cur_blob.read(process)?;
if !blob.data.is_null() {
allocated_list.push(blob.data);
}
// Check if we have too many elements.
if allocated_list.len() >= peak_alloc {
break;
}
cur_blob = blob.next;
}
Ok(if unallocated_list.len() > allocated_list.len() {
unallocated_list
} else {
allocated_list
})
}
}