-
Notifications
You must be signed in to change notification settings - Fork 99
issue/896 - 为c++和python中的tensor添加打印函数 #930
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -2,6 +2,7 @@ | |
|
|
||
| import infinicore.context as context | ||
| import infinicore.nn as nn | ||
| from infinicore._tensor_str import printoptions, set_printoptions | ||
|
|
||
| # Import context functions | ||
| from infinicore.context import ( | ||
|
|
@@ -134,6 +135,8 @@ | |
| "strided_empty", | ||
| "strided_from_blob", | ||
| "zeros", | ||
| "set_printoptions", | ||
| "printoptions", | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. torch中存在一下两个函数 |
||
| ] | ||
|
|
||
| use_ntops = False | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,114 @@ | ||
| import contextlib | ||
| import dataclasses | ||
| from typing import Any, Optional | ||
|
|
||
| from infinicore.lib import _infinicore | ||
|
|
||
|
|
||
| @dataclasses.dataclass | ||
| class __PrinterOptions: | ||
| precision: int = 4 | ||
| threshold: float = 1000 | ||
| edgeitems: int = 3 | ||
| linewidth: int = 80 | ||
| sci_mode: Optional[bool] = None | ||
|
|
||
|
|
||
| PRINT_OPTS = __PrinterOptions() | ||
|
|
||
|
|
||
| def set_printoptions( | ||
| precision=None, | ||
| threshold=None, | ||
| edgeitems=None, | ||
| linewidth=None, | ||
| profile=None, | ||
| sci_mode=None, | ||
| ): | ||
| r"""Set options for printing. | ||
| Args: | ||
| precision: Number of digits of precision for floating point output (default = 4). | ||
| threshold: Total number of array elements which trigger summarization rather than full `repr` (default = 1000). | ||
| edgeitems: Number of array items in summary at beginning and end of each dimension (default = 3). | ||
| linewidth: The number of characters per line (default = 80). | ||
| profile: Sane defaults for pretty printing. Can override with any of the above options. (any one of `default`, `short`, `full`) | ||
| sci_mode: Enable (True) or disable (False) scientific notation. | ||
| If None (default) is specified, the value is automatically chosen by the framework. | ||
|
|
||
| Example:: | ||
| >>> # Limit the precision of elements | ||
| >>> torch.set_printoptions(precision=2) | ||
| >>> torch.tensor([1.12345]) | ||
| tensor([1.12]) | ||
| """ | ||
| if profile is not None: | ||
| if profile == "default": | ||
| PRINT_OPTS.precision = 4 | ||
| PRINT_OPTS.threshold = 1000 | ||
| PRINT_OPTS.edgeitems = 3 | ||
| PRINT_OPTS.linewidth = 80 | ||
| elif profile == "short": | ||
| PRINT_OPTS.precision = 2 | ||
| PRINT_OPTS.threshold = 1000 | ||
| PRINT_OPTS.edgeitems = 2 | ||
| PRINT_OPTS.linewidth = 80 | ||
| elif profile == "full": | ||
| PRINT_OPTS.precision = 4 | ||
| PRINT_OPTS.threshold = 2147483647 # CPP_INT32_MAX | ||
| PRINT_OPTS.edgeitems = 3 | ||
| PRINT_OPTS.linewidth = 80 | ||
| else: | ||
| raise ValueError( | ||
| f"Invalid profile: {profile}. the profile must be one of 'default', 'short', 'full'" | ||
| ) | ||
|
|
||
| if precision is not None: | ||
| PRINT_OPTS.precision = precision | ||
| if threshold is not None: | ||
| PRINT_OPTS.threshold = threshold | ||
| if edgeitems is not None: | ||
| PRINT_OPTS.edgeitems = edgeitems | ||
| if linewidth is not None: | ||
| PRINT_OPTS.linewidth = linewidth | ||
| PRINT_OPTS.sci_mode = sci_mode | ||
|
|
||
| _infinicore.set_printoptions( | ||
| PRINT_OPTS.precision, | ||
| PRINT_OPTS.threshold, | ||
| PRINT_OPTS.edgeitems, | ||
| PRINT_OPTS.linewidth, | ||
| PRINT_OPTS.sci_mode, | ||
| ) | ||
|
|
||
|
|
||
| def get_printoptions() -> dict[str, Any]: | ||
| r"""Gets the current options for printing, as a dictionary that | ||
| can be passed as ``**kwargs`` to set_printoptions(). | ||
| """ | ||
| return dataclasses.asdict(PRINT_OPTS) | ||
|
|
||
|
|
||
| @contextlib.contextmanager | ||
| def printoptions( | ||
| precision=None, threshold=None, edgeitems=None, linewidth=None, sci_mode=None | ||
| ): | ||
| r"""Context manager that temporarily changes the print options.""" | ||
| old_kwargs = get_printoptions() | ||
|
|
||
| set_printoptions( | ||
| precision=precision, | ||
| threshold=threshold, | ||
| edgeitems=edgeitems, | ||
| linewidth=linewidth, | ||
| sci_mode=sci_mode, | ||
| ) | ||
| try: | ||
| yield | ||
| finally: | ||
| set_printoptions(**old_kwargs) | ||
|
|
||
|
|
||
| def _str(self): | ||
| cpp_tensor_str = self._underlying.__str__() | ||
| py_dtype_str = "dtype=" + self.dtype.__repr__() | ||
| return cpp_tensor_str.split("dtype=INFINI.")[0] + py_dtype_str + ")\n" |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -898,8 +898,8 @@ TestResult NNModuleTest::testModuleLinear() { | |
|
|
||
| // Test forward with residual connection | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. c++的linear有个版移除了一个函数,导致c++测试编译不过。于是,在src/infinicore-test/test_nn_module.cc中注释或删除了传递的参数 |
||
| spdlog::info("Testing Linear forward with residual connection"); | ||
| auto residual = infinicore::Tensor::ones({2, 4}, infinicore::DataType::F32, infinicore::Device()); | ||
| auto output_with_residual = m1.forward(input1, residual); | ||
| // auto residual = infinicore::Tensor::ones({2, 4}, infinicore::DataType::F32, infinicore::Device()); | ||
| auto output_with_residual = m1.forward(input1); | ||
| if (output_with_residual->shape() != std::vector<size_t>({2, 4})) { | ||
| spdlog::error("Linear output with residual shape mismatch. Expected {{2, 4}}, got different shape"); | ||
| return false; | ||
|
|
@@ -911,10 +911,10 @@ TestResult NNModuleTest::testModuleLinear() { | |
|
|
||
| // Create test data with known values for verification | ||
| auto test_input = infinicore::Tensor::ones({2, 8}, infinicore::DataType::F32, infinicore::Device()); | ||
| auto test_residual = infinicore::Tensor::ones({2, 4}, infinicore::DataType::F32, infinicore::Device()); | ||
| // auto test_residual = infinicore::Tensor::ones({2, 4}, infinicore::DataType::F32, infinicore::Device()); | ||
|
|
||
| // Get InfiniCore result | ||
| auto infinicore_output = m1.forward(test_input, test_residual); | ||
| auto infinicore_output = m1.forward(test_input); | ||
|
|
||
| // Compute naive result: output = input @ weight.T + bias + residual | ||
| auto naive_output = infinicore::Tensor::empty({2, 4}, infinicore::DataType::F32, infinicore::Device()); | ||
|
|
@@ -935,7 +935,7 @@ TestResult NNModuleTest::testModuleLinear() { | |
| infinicore::op::add_(naive_output, matmul_result, bias_view); | ||
|
|
||
| // Add residual | ||
| infinicore::op::add_(naive_output, naive_output, test_residual); | ||
| // infinicore::op::add_(naive_output, naive_output, test_residual); | ||
|
|
||
| // Compare results with actual value checking | ||
| if (infinicore_output->shape() != naive_output->shape()) { | ||
|
|
@@ -956,10 +956,10 @@ TestResult NNModuleTest::testModuleLinear() { | |
| // Test computation correctness without bias (using m2) | ||
| spdlog::info("Testing computation correctness without bias"); | ||
| auto test_input_no_bias = infinicore::Tensor::ones({1, 16}, infinicore::DataType::F32, infinicore::Device()); | ||
| auto test_residual_no_bias = infinicore::Tensor::ones({1, 3}, infinicore::DataType::F32, infinicore::Device()); | ||
| // auto test_residual_no_bias = infinicore::Tensor::ones({1, 3}, infinicore::DataType::F32, infinicore::Device()); | ||
|
|
||
| // Get InfiniCore result (no bias) | ||
| auto infinicore_output_no_bias = m2.forward(test_input_no_bias, test_residual_no_bias); | ||
| auto infinicore_output_no_bias = m2.forward(test_input_no_bias); | ||
|
|
||
| // Compute naive result without bias: output = input @ weight.T + residual | ||
| auto naive_output_no_bias = infinicore::Tensor::empty({1, 3}, infinicore::DataType::F32, infinicore::Device()); | ||
|
|
@@ -970,7 +970,7 @@ TestResult NNModuleTest::testModuleLinear() { | |
| auto matmul_result_no_bias = infinicore::op::matmul(test_input_no_bias, weight_t_no_bias); // [1, 3] | ||
|
|
||
| // Add residual | ||
| infinicore::op::add_(naive_output_no_bias, matmul_result_no_bias, test_residual_no_bias); | ||
| // infinicore::op::add_(naive_output_no_bias, matmul_result_no_bias, test_residual_no_bias); | ||
|
|
||
| // Compare results with actual value checking | ||
| if (infinicore_output_no_bias->shape() != naive_output_no_bias->shape()) { | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
torch中把print的一些 函数调用放到了_tensor_str.py文件中,于是也新建了一个 _tensor_str