Skip to content

Commit

Permalink
Merge pull request #86570 from RandomShaper/remove_traces_of_rid_prea…
Browse files Browse the repository at this point in the history
…lloc

Remove traces of the extinct RID preallocate feature
  • Loading branch information
akien-mga committed Jan 2, 2024
2 parents 3e67dc1 + c217041 commit 14b1bb4
Show file tree
Hide file tree
Showing 6 changed files with 0 additions and 10 deletions.
1 change: 0 additions & 1 deletion core/config/project_settings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1462,7 +1462,6 @@ ProjectSettings::ProjectSettings() {
GLOBAL_DEF("debug/settings/crash_handler/message.editor",
String("Please include this when reporting the bug on: https://github.com/godotengine/godot/issues"));
GLOBAL_DEF_RST(PropertyInfo(Variant::INT, "rendering/occlusion_culling/bvh_build_quality", PROPERTY_HINT_ENUM, "Low,Medium,High"), 2);
GLOBAL_DEF(PropertyInfo(Variant::INT, "memory/limits/multithreaded_server/rid_pool_prealloc", PROPERTY_HINT_RANGE, "0,500,1"), 60); // No negative and limit to 500 due to crashes.
GLOBAL_DEF_RST("internationalization/rendering/force_right_to_left_layout_direction", false);
GLOBAL_DEF_BASIC(PropertyInfo(Variant::INT, "internationalization/rendering/root_node_layout_direction", PROPERTY_HINT_ENUM, "Based on Application Locale,Left-to-Right,Right-to-Left,Based on System Locale"), 0);

Expand Down
3 changes: 0 additions & 3 deletions doc/classes/ProjectSettings.xml
Original file line number Diff line number Diff line change
Expand Up @@ -1993,9 +1993,6 @@
<member name="memory/limits/message_queue/max_size_mb" type="int" setter="" getter="" default="32">
Godot uses a message queue to defer some function calls. If you run out of space on it (you will see an error), you can increase the size here.
</member>
<member name="memory/limits/multithreaded_server/rid_pool_prealloc" type="int" setter="" getter="" default="60">
This is used by servers when used in multi-threading mode (servers and visual). RIDs are preallocated to avoid stalling the server requesting them on threads. If servers get stalled too often when loading resources in a thread, increase this number.
</member>
<member name="navigation/2d/default_cell_size" type="float" setter="" getter="" default="1.0">
Default cell size for 2D navigation maps. See [method NavigationServer2D.map_set_cell_size].
</member>
Expand Down
2 changes: 0 additions & 2 deletions servers/physics_server_2d_wrap_mt.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,6 @@ PhysicsServer2DWrapMT::PhysicsServer2DWrapMT(PhysicsServer2D *p_contained, bool
physics_server_2d = p_contained;
create_thread = p_create_thread;

pool_max_size = GLOBAL_GET("memory/limits/multithreaded_server/rid_pool_prealloc");

if (!p_create_thread) {
server_thread = Thread::get_caller_id();
} else {
Expand Down
1 change: 0 additions & 1 deletion servers/physics_server_2d_wrap_mt.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ class PhysicsServer2DWrapMT : public PhysicsServer2D {
bool first_frame = true;

Mutex alloc_mutex;
int pool_max_size = 0;

public:
#define ServerName PhysicsServer2D
Expand Down
2 changes: 0 additions & 2 deletions servers/physics_server_3d_wrap_mt.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,6 @@ PhysicsServer3DWrapMT::PhysicsServer3DWrapMT(PhysicsServer3D *p_contained, bool
physics_server_3d = p_contained;
create_thread = p_create_thread;

pool_max_size = GLOBAL_GET("memory/limits/multithreaded_server/rid_pool_prealloc");

if (!p_create_thread) {
server_thread = Thread::get_caller_id();
} else {
Expand Down
1 change: 0 additions & 1 deletion servers/physics_server_3d_wrap_mt.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ class PhysicsServer3DWrapMT : public PhysicsServer3D {
bool first_frame = true;

Mutex alloc_mutex;
int pool_max_size = 0;

public:
#define ServerName PhysicsServer3D
Expand Down

0 comments on commit 14b1bb4

Please sign in to comment.