-
Notifications
You must be signed in to change notification settings - Fork 10k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
ggml : sync ggml core (minor additions, e.g. ggml_get_tensor_by_name())
- Loading branch information
Showing
2 changed files
with
48 additions
and
10 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -198,6 +198,7 @@ | |
#define GGML_MAX_PARAMS 256 | ||
#define GGML_MAX_CONTEXTS 64 | ||
#define GGML_MAX_OPT 4 | ||
#define GGML_MAX_NAME 32 | ||
#define GGML_DEFAULT_N_THREADS 4 | ||
|
||
#define GGML_ASSERT(x) \ | ||
|
@@ -372,11 +373,16 @@ extern "C" { | |
|
||
void * data; | ||
|
||
char name[32]; | ||
char name[GGML_MAX_NAME]; | ||
|
||
char padding[16]; | ||
}; | ||
|
||
static const size_t GGML_TENSOR_SIZE = sizeof(struct ggml_tensor); | ||
|
||
// use this to compute the memory overhead of a tensor | ||
static const size_t GGML_TENSOR_OVERHEAD = (GGML_OBJECT_SIZE + GGML_TENSOR_SIZE + 16); | ||
This comment has been minimized.
Sorry, something went wrong.
This comment has been minimized.
Sorry, something went wrong.
howard0su
Collaborator
|
||
|
||
// computation graph | ||
struct ggml_cgraph { | ||
int n_nodes; | ||
|
@@ -429,6 +435,7 @@ extern "C" { | |
GGML_API float ggml_type_sizef(enum ggml_type type); // ggml_type_size()/ggml_blck_size() as float | ||
|
||
GGML_API const char * ggml_type_name(enum ggml_type type); | ||
GGML_API const char * ggml_op_name (enum ggml_op op); | ||
|
||
GGML_API size_t ggml_element_size(const struct ggml_tensor * tensor); | ||
|
||
|
@@ -445,6 +452,7 @@ extern "C" { | |
GGML_API size_t ggml_used_mem(const struct ggml_context * ctx); | ||
|
||
GGML_API size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch); | ||
GGML_API void ggml_set_no_alloc(struct ggml_context * ctx, bool no_alloc); | ||
|
||
GGML_API struct ggml_tensor * ggml_new_tensor( | ||
struct ggml_context * ctx, | ||
|
@@ -970,6 +978,8 @@ extern "C" { | |
GGML_API void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph); | ||
GGML_API void ggml_graph_reset (struct ggml_cgraph * cgraph); | ||
|
||
GGML_API struct ggml_tensor * ggml_get_tensor_by_name(struct ggml_cgraph * cgraph, const char * name); | ||
|
||
// print info and performance information for the graph | ||
GGML_API void ggml_graph_print(const struct ggml_cgraph * cgraph); | ||
|
||
|
cuda for some reason does not like this.