Commit 0699f971 authored by Cianciosa, Mark's avatar Cianciosa, Mark
Browse files

Formatting fixes no functionality changes. Remove unneeded namespace qualifer in model builder.

parent d5665612
Loading
Loading
Loading
Loading
+15.7 KiB (172 KiB)

File changed.

No diff preview for this file type.

+5 −0
Original line number Diff line number Diff line
@@ -4,6 +4,11 @@
<dict>
	<key>SchemeUserState</key>
	<dict>
		<key>ml_embedder_test.xcscheme_^#shared#^_</key>
		<dict>
			<key>orderHint</key>
			<integer>0</integer>
		</dict>
		<key>ml_embeder_test.xcscheme_^#shared#^_</key>
		<dict>
			<key>orderHint</key>
+4 −2
Original line number Diff line number Diff line
@@ -100,9 +100,11 @@ namespace ml_embedder {
                    buffer<T> read(const size_t size) {
                        buffer<T> b(size);
                        if constexpr (std::is_same<T, float>::value) {
                            check_error(H5Dread(d, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, 0, b.data()));
                            check_error(H5Dread(d, H5T_NATIVE_FLOAT, H5S_ALL,
                                                H5S_ALL, 0, b.data()));
                        } else {
                            check_error(H5Dread(d, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, 0, b.data()));
                            check_error(H5Dread(d, H5T_NATIVE_DOUBLE, H5S_ALL,
                                                H5S_ALL, 0, b.data()));
                        }
                        return b;
                    }
+15 −13
Original line number Diff line number Diff line
@@ -87,31 +87,33 @@ namespace ml_embedder {
                  const int input_size,
                  function &last) {
                const int width = config["units"];
                ml_embedder::hdf5::file::group vars = layer.open_group("vars");
                ml_embedder::hdf5::file::group::dataset weigths = vars.open_dataset("0");
                hdf5::file::group vars = layer.open_group("vars");
                hdf5::file::group::dataset weigths = vars.open_dataset("0");

                mlx::core::array weight = mlx::core::zeros({0});
                if (config["dtype"]["config"]["name"] == "float32") {
                    ml_embedder::hdf5::file::group::dataset::buffer<float> wb = weigths.template read<float> (input_size*width);
                    hdf5::file::group::dataset::buffer<float> wb = weigths.template read<float> (input_size*width);
                    weight = mlx::core::array(wb.data(), {width, input_size});
                } else {
                    ml_embedder::hdf5::file::group::dataset::buffer<double> wb = weigths.template read<double> (input_size*width);
                    weight = mlx::core::array(wb.data(), {width, input_size}, mlx::core::float64);
                    hdf5::file::group::dataset::buffer<double> wb = weigths.template read<double> (input_size*width);
                    weight = mlx::core::array(wb.data(), {width, input_size},
                                              mlx::core::float64);
                }

                last = mlx::core::compile([last, weight](const std::vector<mlx::core::array> &in) {
                    return std::vector<mlx::core::array> ({mlx::core::einsum("ij,...j->...i", {weight, last({in[0]})[0]})});
                    return std::vector<mlx::core::array> ({mlx::core::einsum("ij,...j->...i",
                                                                             {weight, last({in[0]})[0]})});
                });
                
                if (config["use_bias"]) {
                    ml_embedder::hdf5::file::group::dataset biass = vars.open_dataset("1");
                    hdf5::file::group::dataset biass = vars.open_dataset("1");

                    mlx::core::array bias = mlx::core::zeros({0});
                    if (config["dtype"]["config"]["name"] == "float32") {
                        ml_embedder::hdf5::file::group::dataset::buffer<float> bb = biass.template read<float> (width);
                        hdf5::file::group::dataset::buffer<float> bb = biass.template read<float> (width);
                        bias = mlx::core::array(bb.data(), {width});
                    } else {
                        ml_embedder::hdf5::file::group::dataset::buffer<double> bb = biass.template read<double> (width);
                        hdf5::file::group::dataset::buffer<double> bb = biass.template read<double> (width);
                        bias = mlx::core::array(bb.data(), {width}, mlx::core::float64);
                    }

@@ -139,16 +141,16 @@ namespace ml_embedder {
        keras(const std::string model_file) {
            initalize();
            
            ml_embedder::zip zip(model_file);
            zip zip(model_file);
            const nlohmann::json config =
                nlohmann::json::parse(zip.get_file("config.json").get_buffer());

            if (config["class_name"] == "Sequential") {
                ml_embedder::zip::file::buffer weights_file =
                zip::file::buffer weights_file =
                    zip.get_file("model.weights.h5").get_buffer();
                ml_embedder::hdf5::file hf(weights_file.data(),
                hdf5::file hf(weights_file.data(),
                                          weights_file.size());
                ml_embedder::hdf5::file::group layers = hf.open_group("layers");
                hdf5::file::group layers = hf.open_group("layers");

//  Start with a place holder layer.
                func = mlx::core::compile([](inputs in) {
+2 −1
Original line number Diff line number Diff line
@@ -27,7 +27,8 @@ int main(int argc, const char * argv[]) {
        totalsize *= in_shape[i];
    }
    float *input_buffer = (float *)malloc(sizeof(float)*totalsize);
    for (float *pos = input_buffer, *end = input_buffer + totalsize; pos < end; pos++) {
    for (float *pos = input_buffer, *end = input_buffer + totalsize;
         pos < end; pos++) {
        pos[0] = 1.0;
    }

+1 −1

File changed.

Contains only whitespace changes.

Loading