Skip to content

Commit

Permalink
Hotfix/suffix buffer size (#19)
Browse files Browse the repository at this point in the history
* Minor robustness fixes

* Bumping version + notes

* Adding scalar tests and fixed a bug

* Adding to the notes
  • Loading branch information
matajoh authored Feb 8, 2021
1 parent 4d3c358 commit 43fcada
Show file tree
Hide file tree
Showing 12 changed files with 54 additions and 11 deletions.
7 changes: 7 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,12 @@
# Changelog

## [2021-02-08 - Version 1.2.1](https://github.com/matajoh/libnpy/releases/tag/v1.2.1)

Improvements:
- Bug fix for scalar tensor reading
- Bug fix with memstream buffer size at initialization
- ".npy" will be added to tensor names in NPZ writing if not already present

## [2021-01-19 - Version 1.2.0](https://github.com/matajoh/libnpy/releases/tag/v1.2.0)

New Features:
Expand Down
8 changes: 3 additions & 5 deletions RELEASE_NOTES
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
New Features:
- Easier indexing (variable argument index method + negative indexes)
- Easier access to shape

Improvements:
- Cmake upgraded to "modern" usage, i.e. you use the library by adding `npy::npy` as a link library
- Bug fix for scalar tensor reading
- Bug fix with memstream buffer size at initialization
- ".npy" will be added to tensor names in NPZ writing if not already present
2 changes: 1 addition & 1 deletion VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.2.0
1.2.1
Binary file added assets/test/int32_scalar.npy
Binary file not shown.
10 changes: 9 additions & 1 deletion include/npy/npz.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,15 @@ class onpzstream

omemstream output;
save(output, tensor);
this->write_file(filename, std::move(output.buf()));

std::string suffix = ".npy";
std::string name = filename;
if(name.size() < 4 || !std::equal(suffix.rbegin(), suffix.rend(), name.rbegin()))
{
name += ".npy";
}

this->write_file(name, std::move(output.buf()));
}

/** Write a tensor to the NPZ archive.
Expand Down
5 changes: 3 additions & 2 deletions src/memstream.cpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#include "npy/core.h"

namespace {
const int BUFFER_SIZE = 1;//64 * 1024;
const int BUFFER_SIZE = 64 * 1024;
}

namespace npy
Expand All @@ -11,8 +11,9 @@ membuf::membuf() : membuf(BUFFER_SIZE)
this->seekpos(0);
}

membuf::membuf(size_t n) : m_buffer(n)
membuf::membuf(size_t n)
{
m_buffer.reserve(BUFFER_SIZE);
this->seekpos(0);
}

Expand Down
5 changes: 5 additions & 0 deletions src/npy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,11 @@ void skip_whitespace(std::istream &input)

std::string read_to(std::istream &input, char delim)
{
if(input.peek() == delim)
{
return "";
}

input.get(BUFFER, BUFFER_SIZE, delim);
auto length = input.gcount();
assert(length < BUFFER_SIZE);
Expand Down
12 changes: 11 additions & 1 deletion test/libnpy_tests.h
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,17 @@ std::string npy_stream(npy::endian_t endianness = npy::endian_t::NATIVE)
npy::tensor<T> tensor = test_tensor<T>({5, 2, 5});
npy::save(actual_stream, tensor, endianness);
return actual_stream.str();
};
}

template <typename T>
std::string npy_scalar_stream(npy::endian_t endianness = npy::endian_t::NATIVE)
{
std::ostringstream actual_stream;
npy::tensor<T> tensor = test_tensor<T>({});
*tensor.data() = static_cast<T>(42);
npy::save(actual_stream, tensor, endianness);
return actual_stream.str();
}

template <typename T>
std::string npy_fortran_stream(npy::endian_t endianness = npy::endian_t::NATIVE)
Expand Down
1 change: 1 addition & 0 deletions test/npy_read.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ int test_npy_read()
test_read<std::uint32_t>(result, "uint32");
test_read<std::int32_t>(result, "int32");
test_read<std::int32_t>(result, "int32_big");
test_read_scalar<std::int32_t>(result, "int32_scalar");
test_read<std::uint64_t>(result, "uint64");
test_read<std::int64_t>(result, "int64");
test_read<float>(result, "float32");
Expand Down
9 changes: 9 additions & 0 deletions test/npy_read.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,13 @@ void test_read(int &result, const std::string &name, bool fortran_order = false)
test::assert_equal(expected, actual, result, "npy_read_" + name);
}

template <typename T>
void test_read_scalar(int &result, const std::string &name)
{
npy::tensor<T> expected = test::test_tensor<T>({});
*expected.data() = static_cast<T>(42);
npy::tensor<T> actual = npy::load<T, npy::tensor>(test::asset_path(name + ".npy"));
test::assert_equal(expected, actual, result, "npy_read_" + name);
}

#endif
4 changes: 4 additions & 0 deletions test/npy_write.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,10 @@ int test_npy_write()
actual = test::npy_stream<std::int32_t>(npy::endian_t::BIG);
test::assert_equal(expected, actual, result, "npy_write_int32_big");

expected = test::read_asset("int32_scalar.npy");
actual = test::npy_scalar_stream<std::int32_t>(npy::endian_t::LITTLE);
test::assert_equal(expected, actual, result, "npy_write_int32_scalar");

expected = test::read_asset("uint64.npy");
actual = test::npy_stream<std::uint64_t>(npy::endian_t::LITTLE);
test::assert_equal(expected, actual, result, "npy_write_uint64");
Expand Down
2 changes: 1 addition & 1 deletion test/npz_write.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ void _test(int &result, npy::compression_method_t compression_method)

{
npy::onpzstream npz(TEMP_NPZ, compression_method, npy::endian_t::LITTLE);
npz.write("color.npy", test::test_tensor<std::uint8_t>({5, 5, 3}));
npz.write("color", test::test_tensor<std::uint8_t>({5, 5, 3}));
npz.write("depth.npy", test::test_tensor<float>({5, 5}));
}

Expand Down

0 comments on commit 43fcada

Please sign in to comment.