Where communities thrive


  • Join over 1.5M+ people
  • Join over 100K+ communities
  • Free without limits
  • Create your own community
People
Activity
  • Dec 06 21:09
    mishun closed #461
  • Dec 06 21:09
    mishun commented #461
  • Dec 06 14:32
  • Dec 06 13:43
    CoolCreasu starred SciSharp/BotSharp
  • Dec 06 12:30
    Tirael starred SciSharp/Pandas.NET
  • Dec 06 10:57
    dklein9500 opened #468
  • Dec 06 06:40
    githubningyuan starred SciSharp/TensorFlow.NET
  • Dec 06 04:56
    marsousi opened #467
  • Dec 06 03:20
    brunotech starred SciSharp/BotSharp
  • Dec 06 02:04
    pkalivas starred SciSharp/NumSharp
  • Dec 06 01:42
    cross-hello commented #884
  • Dec 06 00:00
    PMC-AI commented #187
  • Dec 05 20:42
  • Dec 05 19:57
    Oceania2018 commented #822
  • Dec 05 19:55
    Oceania2018 commented #822
  • Dec 05 19:51
    Oceania2018 closed #870
  • Dec 05 19:51
    Oceania2018 commented #870
  • Dec 05 19:51

    Oceania2018 on master

    keras average pooling #870 (compare)

  • Dec 05 18:43
    mishun commented #822
  • Dec 05 18:09
    Oceania2018 commented #815
tcwicks
@tcwicks
@Craigjw Here you go - pasted across 3 messages:
First the helper function that does the equivalent of Keras Dense:

public static Tensor FullyConnectedDense(Tensor _Input, int _Num_Units, TF_DataType dataType = TF_DataType.TF_FLOAT, bool Trainable = true, bool Normalize = false,
    IInitializer InitializeVariant = null, string _Name = null)
{
    if (InitializeVariant == null)
    {
        InitializeVariant = tf.truncated_normal_initializer(0, 1);
    }
    int in_dim = (int)_Input.shape[1];
    string WeightsName;
    string BiasName;
    WeightsName = _Name.ConcatIfNotNullOrEmptyElseNull(@"_Weights");
    BiasName = _Name.ConcatIfNotNullOrEmptyElseNull(@"_Bias");

    if (in_dim == 0)
    {
        in_dim = -1;
    }
    ResourceVariable Weights;
    ResourceVariable Bias;
    Weights = tf.Variable(
        InitializeVariant,
        name: WeightsName,
        dtype: dataType,
        shape: new int[2] { in_dim, _Num_Units },
        trainable: Trainable,
        validate_shape: false);

    Bias = tf.Variable(
        InitializeVariant,
        name: BiasName,
        dtype: dataType,
        shape: new int[1] { _Num_Units },
        trainable: Trainable,
        validate_shape: false);
    Tensor layer = tf.matmul(_Input, Weights) + Bias;

    return layer;

}
@Craigjw First half of the actual method:

public static void BuildTrainExample(bool IsNormalizing)
{
    //These values are not normalized so they will perform badly.
    //Unless they were intended as categorical classes
    //in which case they should be 1 Hot encoded instead of normalized
    NDArray InputData = np.array(-7.0f, -4.0f, -1.0f, 2.0f, 5.0f, 8.0f, 11.0f, 14.0f,
        -7.0f, -4.0f, -1.0f, 2.0f, 5.0f, 8.0f, 11.0f, 14.0f,
        -7.0f, -4.0f, -1.0f, 2.0f, 5.0f, 8.0f, 11.0f, 14.0f).reshape(new Shape(3, 8));
    NDArray OutputLabels = np.array(3.0f, 6.0f, 9.0f, 12.0f, 15.0f, 18.10f, 21.0f, 24.0f,
        3.0f, 6.0f, 9.0f, 12.0f, 15.0f, 18.10f, 21.0f, 24.0f,
        3.0f, 6.0f, 9.0f, 12.0f, 15.0f, 18.10f, 21.0f, 24.0f).reshape(new Shape(3, 8)); ;

    // Have to do this early because it is eager by default and placeholders cannot be used
    tf.compat.v1.disable_eager_execution();
    Graph MainGraph = new Graph().as_default(); // To reset the graph etc.. you can tf.reset_default_graph();

    Tensor InputPlaceHolder = tf.placeholder(tf.float32, shape: new int[2] { -1, (int)InputData.shape[1] }, name: "Input");
    Tensor LabelPlaceHolder = tf.placeholder(tf.float32, shape: new int[2] { -1, (int)OutputLabels.shape[1] }, name: "Labels");

    int NormlizatioScaleOrNumClasses = 24;
    Tensor NormalizationFactor = tf.constant((float)NormlizatioScaleOrNumClasses, TF_DataType.TF_FLOAT);
    Tensor LabelsNormalized = tf.div(LabelPlaceHolder, NormalizationFactor);

    int NumInputVectors = (int)InputData.shape[1];
    int NumOutputVectors = (int)OutputLabels.shape[1];

    Tensor InputRemap;


    Tensor DenseLayerLogits;
    Tensor DenseLayerActivated;
    Tensor DenseLayerFinal;
    Tensor Loss_MSELoss;
    Tensor Loss_SSELoss;
    if (IsNormalizing)
    {
        InputRemap = tf.div(InputPlaceHolder, NormalizationFactor);
        DenseLayerLogits = FullyConnectedDense(InputRemap, NumOutputVectors);
        DenseLayerActivated = tf.nn.leaky_relu(DenseLayerLogits);
        //or whatever other activation
        //Tensor Activation = tf.nn.sigmoid(DenseLayer);
        //Tensor Activation = tf.nn.relu(DenseLayer);
        //Tensor Activation = tf.nn.tanh(DenseLayer);
    }
    else
    {
        //Instead if you were doing 1 Hot
        InputRemap = tf.one_hot(InputPlaceHolder, NormlizatioScaleOrNumClasses);
        InputRemap = tf.reshape(InputRemap, new int[2] { -1, (NumInputVectors * NormlizatioScaleOrNumClasses)});
        //This will not work because argmax has no gradient implemented here so it breaks the optimizer / gradient flow
        //DenseLayerLogits = FullyConnectedDense(InputRemap, NumOutputVectors * NormlizatioScaleOrNumClasses);
        //DenseLayerActivated = tf.nn.sigmoid(DenseLayerLogits);
        //DenseLayerActivated = tf.reshape(DenseLayerActivated, new int[3] { -1, NumInputVectors, NormlizatioScaleOrNumClasses });
        //DenseLayerActivated = tf.arg_max(DenseLayerActivated, 2);
        //DenseLayerActivated = tf.cast(DenseLayerActivated, TF_DataType.TF_FLOAT);

        DenseLayerLogits = FullyConnectedDense(InputRemap, NumOutputVectors);
        DenseLayerActivated = tf.nn.leaky_relu(DenseLayerLogits);

    }
@Craigjw And here is the second half of the method:

    Tensor LearningRate = tf.placeholder(tf.float32, shape: new int[0], name: "LearningRate");

    //MSE Loss
    Loss_MSELoss = tf.reshape(tf.reduce_mean(tf.square(LabelsNormalized - DenseLayerActivated), axis: 1), new int[2] { -1, 1 });
    //SSE Loss
    Loss_SSELoss = tf.reshape(tf.reduce_sum(tf.square(LabelsNormalized - DenseLayerActivated), axis: 1), new int[2] { -1, 1 });
    Operation NetworkOptimizer = new Tensorflow.Train.AdamOptimizer(LearningRate).minimize(Loss_MSELoss);
    //Operation NetworkOptimizer = new Tensorflow.Train.AdamOptimizer(LearningRate).minimize(Loss_SSELoss);

    Operation Init = tf.global_variables_initializer();


    //various Config option examples
    var TFConfig = new ConfigProto();
    TFConfig.GpuOptions = new GPUOptions();
    TFConfig.GpuOptions.AllowGrowth = true; //Prevents Tensorflow swallowing all GPU memory

    //TFConfig.GpuOptions.PerProcessGpuMemoryFraction = 20;
    //TFConfig.GpuOptions.Experimental = new GPUOptions.Types.Experimental();
    //TFConfig.GpuOptions.Experimental.UseUnifiedMemory = true;
    //TFConfig.IntraOpParallelismThreads = 10; //C# thread count
    //TFConfig.InterOpParallelismThreads = 2;
    //TFConfig.LogDevicePlacement = true; //Writes a hell of a lot to the console

    //This is how you can grab a reference to all the variables if you want to.
    List<ResourceVariable> AllVars = tf.get_collection<ResourceVariable>(tf.GraphKeys.GLOBAL_VARIABLES);

    Saver TFSaver = tf.train.Saver();

    using (Session Sess = tf.Session(MainGraph, config: TFConfig))
    {
        Sess.run(Init);// Initialiizes global Variables/

        Sess.graph.as_default();


        //Only need this if your train code is in some other method somewhere else. It is already the default here.
        //Of course create a proper training loop instead of this for loop that just repeats the same thing.
        for (int Epoch = 1; Epoch <= 20; Epoch++)
        {
            for (int I = 0; I < 50; I++)
            {
                float MyAdjustableLearningRate;
                MyAdjustableLearningRate = 0.001f;

                //Really readable method
                List<FeedItem> FeedList = new List<FeedItem>();
                FeedList.add((InputPlaceHolder, InputData));
                FeedList.add((LabelPlaceHolder, OutputLabels));
                FeedList.add((LearningRate, MyAdjustableLearningRate));
                FeedItem[] FeedArray;
                FeedArray = FeedList.ToArray();


                Sess.run(NetworkOptimizer, FeedArray);

                //Or the shortcut Way
                Sess.run(NetworkOptimizer, (InputPlaceHolder, InputData), (LabelPlaceHolder, OutputLabels), (LearningRate, MyAdjustableLearningRate));
            }
            float SSELoss, MSELoss;
            (MSELoss, SSELoss) = Sess.run((Loss_MSELoss, Loss_SSELoss), (InputPlaceHolder, InputData), (LabelPlaceHolder, OutputLabels));
            StringBuilder sb;
            sb = new StringBuilder();
            if(IsNormalizing)
            {
                sb.Append(@"Normalize Inputs Version: ");
            }
            else
                sb.Append(@"One Hot Version: ");
            {
            }
            sb.Append(@"Epoch: ").Append(Epoch.ToString(@"00"));
            sb.Append(@" - Itteration: ").Append((Epoch * 50).ToString(@"0000"));
            sb.Append(@" - MSE: ").Append(MSELoss.ToString(@"0.000000000000"));
            sb.Append(@" - SSE: ").Append(SSELoss.ToString(@"0.000000000000"));
            Console.WriteLine(sb.ToString());
        }
    }
}
tcwicks
@tcwicks

@Craigjw Here is what the output is:
Note each run results will vary cause in the example the weights are using just a standard truncated normal initializer.

Normalize Inputs Version: Epoch: 01 - Itteration: 0050 - MSE: 0.585179900000 - SSE: 4.681439000000
Normalize Inputs Version: Epoch: 02 - Itteration: 0100 - MSE: 0.454372400000 - SSE: 3.634979000000
Normalize Inputs Version: Epoch: 03 - Itteration: 0150 - MSE: 0.307974100000 - SSE: 2.463793000000
Normalize Inputs Version: Epoch: 04 - Itteration: 0200 - MSE: 0.184113500000 - SSE: 1.472908000000
Normalize Inputs Version: Epoch: 05 - Itteration: 0250 - MSE: 0.104549100000 - SSE: 0.836393200000
Normalize Inputs Version: Epoch: 06 - Itteration: 0300 - MSE: 0.078881500000 - SSE: 0.631052000000
Normalize Inputs Version: Epoch: 07 - Itteration: 0350 - MSE: 0.065381990000 - SSE: 0.523055900000
Normalize Inputs Version: Epoch: 08 - Itteration: 0400 - MSE: 0.055645780000 - SSE: 0.445166200000
Normalize Inputs Version: Epoch: 09 - Itteration: 0450 - MSE: 0.031652850000 - SSE: 0.253222800000
Normalize Inputs Version: Epoch: 10 - Itteration: 0500 - MSE: 0.000863294100 - SSE: 0.006906353000
Normalize Inputs Version: Epoch: 11 - Itteration: 0550 - MSE: 0.000022935460 - SSE: 0.000183483600
Normalize Inputs Version: Epoch: 12 - Itteration: 0600 - MSE: 0.000001567331 - SSE: 0.000012538650
Normalize Inputs Version: Epoch: 13 - Itteration: 0650 - MSE: 0.000000238269 - SSE: 0.000001906155
Normalize Inputs Version: Epoch: 14 - Itteration: 0700 - MSE: 0.000000035869 - SSE: 0.000000286954
Normalize Inputs Version: Epoch: 15 - Itteration: 0750 - MSE: 0.000000004706 - SSE: 0.000000037646
Normalize Inputs Version: Epoch: 16 - Itteration: 0800 - MSE: 0.000000000532 - SSE: 0.000000004254
Normalize Inputs Version: Epoch: 17 - Itteration: 0850 - MSE: 0.000000000051 - SSE: 0.000000000409
Normalize Inputs Version: Epoch: 18 - Itteration: 0900 - MSE: 0.000000000004 - SSE: 0.000000000035
Normalize Inputs Version: Epoch: 19 - Itteration: 0950 - MSE: 0.000000000001 - SSE: 0.000000000009
Normalize Inputs Version: Epoch: 20 - Itteration: 1000 - MSE: 0.000000000000 - SSE: 0.000000000004

One Hot Version: Epoch: 01 - Itteration: 0050 - MSE: 1.136966000000 - SSE: 9.095726000000
One Hot Version: Epoch: 02 - Itteration: 0100 - MSE: 0.663073900000 - SSE: 5.304591000000
One Hot Version: Epoch: 03 - Itteration: 0150 - MSE: 0.416191000000 - SSE: 3.329528000000
One Hot Version: Epoch: 04 - Itteration: 0200 - MSE: 0.265655000000 - SSE: 2.125240000000
One Hot Version: Epoch: 05 - Itteration: 0250 - MSE: 0.059993860000 - SSE: 0.479950800000
One Hot Version: Epoch: 06 - Itteration: 0300 - MSE: 0.027356000000 - SSE: 0.218848000000
One Hot Version: Epoch: 07 - Itteration: 0350 - MSE: 0.018255970000 - SSE: 0.146047700000
One Hot Version: Epoch: 08 - Itteration: 0400 - MSE: 0.000066116740 - SSE: 0.000528933900
One Hot Version: Epoch: 09 - Itteration: 0450 - MSE: 0.000009215314 - SSE: 0.000073722510
One Hot Version: Epoch: 10 - Itteration: 0500 - MSE: 0.000001104103 - SSE: 0.000008832826
One Hot Version: Epoch: 11 - Itteration: 0550 - MSE: 0.000000109968 - SSE: 0.000000879746
One Hot Version: Epoch: 12 - Itteration: 0600 - MSE: 0.000000009018 - SSE: 0.000000072147
One Hot Version: Epoch: 13 - Itteration: 0650 - MSE: 0.000000000602 - SSE: 0.000000004818
One Hot Version: Epoch: 14 - Itteration: 0700 - MSE: 0.000000000035 - SSE: 0.000000000280
One Hot Version: Epoch: 15 - Itteration: 0750 - MSE: 0.000000000004 - SSE: 0.000000000035
One Hot Version: Epoch: 16 - Itteration: 0800 - MSE: 0.000000000002 - SSE: 0.000000000018
One Hot Version: Epoch: 17 - Itteration: 0850 - MSE: 0.000000000001 - SSE: 0.000000000011
One Hot Version: Epoch: 18 - Itteration: 0900 - MSE: 0.000000000001 - SSE: 0.000000000009
One Hot Version: Epoch: 19 - Itteration: 0950 - MSE: 0.000000000001 - SSE: 0.000000000008
One Hot Version: Epoch: 20 - Itteration: 1000 - MSE: 0.000000000001 - SSE: 0.000000000006

tcwicks
@tcwicks

Note: all the example stuff above is against Tensorflow.Net 0.60.4 which is Tensorflow 2.6
So just to clarify when I said earlier that I'm having issues with Tensorflow.Net and had to move across to TorchSharp that is specifically for my use cases which are triggering some rare edge case in the C++ build of Tensorflow.
For most use cases Tensorflow.Net will work just as well as TorchSharp. All depends on what your use cases require.

Also in the example code above I put in a few explanations of things that took me a while to figure out (Like how to get around the ArgMax issue here

tcwicks
@tcwicks
Sorry missed a little helper method in the example code above
        public static string ConcatIfNotNullOrEmptyElseNull(this string Value, string Append = @"", string Prepend = @"")
        {
            if (string.IsNullOrEmpty(Value)) { return null; }
            else { return string.Concat(Prepend, Value, Append); }
        }
behrooz bozorg chami
@behroozbc
hi
help add document to this lib https://github.com/SciSharp/Matplotlib.Net
SuperDaveOsbourne
@SuperDaveOsbourne
I see the new python libraries announced by NVidia last week have GPU acceleration. What is the chance we see the numpy and other libraries for .net get that feature?
NUnitTester
@NUnitTester
Hi there! I'm a bloody beginner and wanted to make a fft with C# using TensorFlow.
Tensor t = tf_with(ops.name_scope("MyTest"), scope =>
{
var x = gen_ops.f_f_t(input);
return x;
});
I tried this, and get a Null-Reference-Execption, when f_f_t is called. A Handle is not set.
What is to do to solve the problem? I didn't find any example with fft ... Sorry for asking.
Haiping
@Oceania2018
@NUnitTester tf.signal.fft is not supported yet.
NUnitTester
@NUnitTester
Ah, thanks. When is it planned?
And du you know a "manual" implementation that is based on fundamental operations of TensorFlow?
uzfm
@uzfm
problems with image classification.
after uploading images class_names = null.
the biggest problem is after learning when i want to predict the image the answer to different classes is almost the same.
maybe someone knows why this problem occurs.
I use
example from ImageClassificationKeras.cs
SciSharp.Models.ImageClassification 0.3.0
SciSharp.TensorFlow.Redist-Windows-GPU 2.6.0
var imgPath2 = PachDt;
Tensor input2 = ImageUtil.ReadImageFromFile (imgPath2, 100, 100);
var datares2 = model.predict (input2);
var score1 = tf.nn.softmax (datares2 [0]);
uzfm
@uzfm
does anyone know solution for image classification to solve this problem?
Vitaliy Shakhlin
@vshakhlin
hello, is it possible to run predict method not in the main thread?
uzfm
@uzfm

@uzfm Does https://github.com/SciSharp/SciSharp-Stack-Examples/blob/master/src/TensorFlowNET.Examples/ImageProcessing/DigitRecognitionCNN.cs have the same problem?

this example works great.
but there is one drawback.
can't predict images from "Image or Bitmap ..."
I tried to create a Tensor from Image. prediction only works once. if I create Tensor from Image next time, Tensor is empty. I used var input = tf.data.Dataset.from_tensors (img_Ary).
This example "var input = ImageUtil.ReadImageFromFile (imgPath)" works fine
Is it possible to get Tensor not from a file?
Is it possible to make predictions from Image or Bitmap using "Scisharp.Models.Modelvisard"?

Vitaliy Shakhlin
@vshakhlin
image.png
if I call predict in a new thread I got NullReference here. this.CallContext is null
Haiping
@Oceania2018
@vshakhlin It will helpful if you can raise an issue and attache runable code sample.
uzfm
@uzfm
   static SciSharp.Models.IImageClassificationTask task ;
   tensorflow tf = new tensorflow();
   SciSharp.Models.ModelWizard wizard = new SciSharp.Models.ModelWizard();

    public void Predict(string PascImage) {

        var img_bgr = CvInvoke.Imread(PascImage, Emgu.CV.CvEnum.ImreadModes.Color);

        Mat output = new Mat();

        CvInvoke.CvtColor(img_bgr, output, Emgu.CV.CvEnum.ColorConversion.  Bgr2Rgb);

        var output1 = output.ToImage<Rgb,byte>().Resize(299,299,Emgu.CV.CvEnum.Inter.Linear);

        output1.ToUMat().ConvertTo(output, Emgu.CV.CvEnum.DepthType.Cv32F, 1.0f / 255.0f);  

        NDArray imgR = np.array(output.GetData()  ,TF_DataType.TF_FLOAT);

        //keras.backend.clear_session();

        Tensor tensorr  = tf.convert_to_tensor( imgR);

        var img_final = tf.expand_dims(tensorr, 0);


        //predict image
        if (task == null) {
            task = wizard.AddImageClassificationTask<TransferLearning>(new SciSharp.Models.TaskOptions{
                DataDir = @"image_classification_v1\flower_photos",
                ModelPath = @"image_classification_v1\saved_model.pb"
            }
             );}

        //works well but only from the file
        //var imgPath = Path.Join(PascImage);
        //input = SciSharp.Models.ImageUtil.ReadImageFromFile(imgPath);

        SciSharp.Models.ModelPredictResult result = task.Predict(img_final);
    }Is it possible to make predictions from Image or Bitmap using "Scisharp.Models.Modelvisard"?
Is it possible to make predictions from Image or Bitmap ?
uzfm
@uzfm
I tried to create a Tensor from Image. prediction only works once. if I create Tensor from Image next time, Tensor is empty. I used var input = tf.data.Dataset.from_tensors (img_Ary).
This example "var input = ImageUtil.ReadImageFromFile (imgPath)" works fine
Is it possible to get Tensor not from a file?
Vitaliy Shakhlin
@vshakhlin

@Oceania2018 thank you for answer. I have a simple class that contains few methods Init() create my model and load weight from file and Predict method that load image and do prediction.

        private void ButtonBase_OnClick(object sender, RoutedEventArgs e)
        {
            var recognizeService = new RecognizeService();
            recognizeService.Init(); // create model and load weight from file

            recognizeService.PredictTest(); // this works fine

            new Thread(() =>
            {
                recognizeService.PredictTest(); // this do not work and got NullReferenceException on the Layer.Apply
            }).Start();

            Task.Run(() =>
            {
                recognizeService.PredictTest(); // the same not work and got NullReferenceException on the Layer.Apply
            });
        }

this is just an artificial example that I got the same behaviour. But in my main code I observe some events (EventHandler) and I want to call Predict method when these events happen but unfortunately got NullReferenceException. I start thinking why it works if I call Predict method when just click the button properly but does not work in the EventHandler? And realized that if I call it inside new Thread that also does not work.
I also attach Init and PredictTest methods

        public void Init()
        {
            _model = keras.Sequential();

            _model.add(layers.InputLayer((240, 240, 3)));
            _model.add(layers.Conv2D(32, (3, 3), padding: "same", activation: "relu"));
            _model.add(layers.Conv2D(16, (3, 3), padding: "same", activation: "relu"));
            _model.add(layers.MaxPooling2D((3, 3)));
            _model.add(layers.Dropout(0.2f));
            _model.add(layers.Flatten());
            _model.add(layers.Dense(128, activation: "relu"));
            _model.add(layers.Dense(256, activation: "relu"));
            _model.add(layers.Dense(14, activation: "softmax"));

            _model.compile(loss: keras.losses.CategoricalCrossentropy(),
                optimizer: keras.optimizers.Adam(0.001f),
                metrics: new[] { "accuracy" });

            _model.load_weights("w2.h5");
        }
        private string PredictTest(string imgPath)
        {
            var imgData = LoadImg(imgPath);
            var newImg = tf.expand_dims(imgData, 0);
            var result = _model.predict(newImg);
            var value = tf.math.argmax(result, 1).numpy()[0];
            return _labels[value];
        }
Vitaliy Shakhlin
@vshakhlin
hello @uzfm, I load my image from file like this maybe it will helpfull for you (all my imgages has 240x240 px). And I not sure that you need reshape method. I use this for prepare Tensor for my Conv2D layer that has 240, 240, 3 shape.
        private NDArray LoadImg(string imgPath)
        {
            var imgWidth = 240;
            var imgHeight = 240;
            using var src = Cv2.ImRead(imgPath, ImreadModes.Color);

            try
            {
                byte[] data = new byte[imgWidth * imgHeight * 3];
                Marshal.Copy(src.Data, data, 0, imgWidth * imgHeight * 3);

                var img = np.array(data);
                img = img.reshape((imgWidth, imgHeight, 3));
                return img;
            }
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }

            return null;
        }
Vitaliy Shakhlin
@vshakhlin
@uzfm sorry I was wrong it not helpfull. You use EmguCV I use OpenCvSharp4 library that don't have method GetData() on the Mat
uzfm
@uzfm
   static SciSharp.Models.IImageClassificationTask task ;
   tensorflow tf = new tensorflow();
   SciSharp.Models.ModelWizard wizard = new SciSharp.Models.ModelWizard();

    public void Predict(string PascImage) {

        var img_bgr = CvInvoke.Imread(PascImage, Emgu.CV.CvEnum.ImreadModes.Color);

        Mat output = new Mat();

        CvInvoke.CvtColor(img_bgr, output, Emgu.CV.CvEnum.ColorConversion.  Bgr2Rgb);

        var output1 = output.ToImage<Rgb,byte>().Resize(299,299,Emgu.CV.CvEnum.Inter.Linear);

        output1.ToUMat().ConvertTo(output, Emgu.CV.CvEnum.DepthType.Cv32F, 1.0f / 255.0f);  

        NDArray imgR = np.array(output.GetData()  ,TF_DataType.TF_FLOAT);

        //keras.backend.clear_session();

        Tensor tensorr  = tf.convert_to_tensor( imgR);

        var img_final = tf.expand_dims(tensorr, 0);


        //predict image
        if (task == null) {
            task = wizard.AddImageClassificationTask<TransferLearning>(new SciSharp.Models.TaskOptions{
                ModelPath = @"image_classification_v1\saved_model.pb"
            }
             );}

        //works well but only from the file
        //var imgPath = Path.Join(PascImage);
        //input = SciSharp.Models.ImageUtil.ReadImageFromFile(imgPath);

        SciSharp.Models.ModelPredictResult result = task.Predict(img_final);
    }
I can't create a Tensor after this feature
task = wizard.AddImageClassificationTask<TransferLearning>(new SciSharp.Models.TaskOptions{
ModelPath = @"image_classification_v1\saved_model.pb"
}
uzfm
@uzfm
I can make one prediction. Then I have to clear the session. keras.backend.clear_session ();
it makes slow.
uzfm
@uzfm
This function makes Tensor work well without a cleaning session.
var imgPath = Path.Join(PascImage);
input = SciSharp.Models.ImageUtil.ReadImageFromFile(imgPath);
I need to download a picture not from a file. Is there such a possibility?
Haiping
@Oceania2018
@uzfm Are you saying the img_final is empty when second prediction?
Can you try var tensorr = tf.constant(imgR) to initialize a tensor?
Vitaliy Shakhlin
@vshakhlin
@uzfm I put to tf.expand_dims not a Tensor but just a NDArray and it works even if I call LoadImg and Predict several times
Vitaliy Shakhlin
@vshakhlin
image.png
@Oceania2018 hello. I clone the repository and add additional check if Value in the callCantext is null create new ThreadLocal (on screen changes)
3 replies
this is my test program. predictions from the picture "Image"
@uzfm Are you saying the img_final is empty when second prediction? Yes.
I tried many options but nothing helped.
"var tensorr = tf.constant (imgR)" also doesn't work
Haiping
@Oceania2018
@vshakhlin The syntax change seems act the same behaviour.
1 reply
Haiping
@Oceania2018
@uzfm @vshakhlin Just upgrade to v0.3.1 https://www.nuget.org/packages/SciSharp.Models.ImageClassification/0.3.1, it will resolve the prediction issue.
uzfm
@uzfm

@uzfm @vshakhlin Just upgrade to v0.3.1 https://www.nuget.org/packages/SciSharp.Models.ImageClassification/0.3.1, it will resolve the prediction issue.

Thank you very much!!! works great.
Is it possible to see the result of the prediction for all classes?

Vitaliy Shakhlin
@vshakhlin
@Oceania2018 this screen demonstrate difference your current code on the left I add on the right my changes
image.png
Haiping
@Oceania2018
@uzfm You can input multiple images at one time.
@vshakhlin You're correct, do you mind PR your fix?
1 reply
Haiping
@Oceania2018
@vshakhlin The fix is working great.