Where communities thrive


  • Join over 1.5M+ people
  • Join over 100K+ communities
  • Free without limits
  • Create your own community
People
Repo info
Activity
  • May 22 2018 12:20
    mijicd unassigned #296
  • May 22 2018 12:20
    mijicd unassigned #295
  • May 22 2018 12:20
    mijicd unassigned #282
  • May 22 2018 12:20
    mijicd unassigned #259
  • May 22 2018 12:20
    mijicd unassigned #245
  • May 22 2018 12:20
    mijicd unassigned #244
  • May 22 2018 12:20
    mijicd unassigned #239
  • May 22 2018 12:20
    mijicd unassigned #237
  • May 22 2018 12:20
    mijicd unassigned #236
  • May 22 2018 12:20
    mijicd unassigned #216
  • May 22 2018 12:20
    mijicd unassigned #214
  • May 22 2018 12:20
    mijicd unassigned #195
  • May 22 2018 12:20
    mijicd unassigned #195
  • May 22 2018 12:20
    mijicd unassigned #195
  • May 22 2018 12:20
    mijicd unassigned #200
  • May 22 2018 12:20
    mijicd unassigned #197
  • May 22 2018 12:20
    mijicd unassigned #195
  • May 22 2018 12:20
    mijicd unassigned #198
  • May 22 2018 12:20
    mijicd unassigned #195
  • May 22 2018 12:20
    mijicd unassigned #194
Misbah Ulhaq
@mmulhaq_twitter
Any guidance will be helpful
Scenerio is like this...
things --> Mainflux ---> External services ----> Mobile/Web applicaition
Manuel Imperiale
@manuio
@mmulhaq_twitter You can create your own message consumer following writers example: https://github.com/mainflux/mainflux/tree/master/consumers
magixmin
@magixmin
@manuio that i have found that .but now send message still have error
connection is accpeted
Manuel Imperiale
@manuio
@magixmin I can't understand whithout more details
magixmin
@magixmin

var mqtt = require('mqtt')

var defaultConnectOptions = {
keepalive: 60,
clientId: '04978536-4637-4e4a-acf3-7ece01faf2fb',
username: "04978536-4637-4e4a-acf3-7ece01faf2fb",
password:"12f86e57-74c1-4888-b5e1-8dd7fda78116",
}

var channelId = '8651eea5-a2c6-4e02-b4a6-4c46877f2497'
var topic = 'channels/' + channelId + '/messages'

var client = mqtt.connect('mqtt://iot.javodata.com',defaultConnectOptions)

client.on('connect', function () {
// client.subscribe('myChannel', function (err) {
// if (!err) {
// client.publish('presence', 'Hello mqtt')
// }
// })
client.subscribe(topic, { qos: 0 })

var domessage = [{"bn":"Dev1","n":"temp","v":40}, {"n":"hum","v":40}, {"bn":"Dev2", "n":"temp","v":40}, {"n":"hum","v":40}]
client.publish(topic, 'WS connection demo!',domessage.toString() )

})

client.on('message', function (topic, message) {
// message is Buffer
console.log(message.toString())
client.end()
})

mainflux-mqtt | {"level":"info","message":"Accepted new client","ts":"2021-01-25T10:50:16.883561817Z"}
mainflux-mqtt | {"level":"info","message":"Connect - client with ID: 04978536-4637-4e4a-acf3-7ece01faf2fb","ts":"2021-01-25T10:50:16.88661259Z"}
mainflux-twins | {"level":"warn","message":"Method save_states took 35.03µs to complete with error: failed to get twin id from redis cache : dial tcp 127.0.0.1:6379: connect: connection refused.","ts":"2021-01-25T10:50:16.901513058Z"}
mainflux-twins | {"level":"error","message":"State save failed: failed to get twin id from redis cache : dial tcp 127.0.0.1:6379: connect: connection refused","ts":"2021-01-25T10:50:16.901576306Z"}
mainflux-twins | {"level":"warn","message":"Failed to handle Mainflux message: failed to get twin id from redis cache : dial tcp 127.0.0.1:6379: connect: connection refused","ts":"2021-01-25T10:50:16.901589268Z"}
mainflux-mqtt | {"level":"info","message":"Subscribe - client ID: 04978536-4637-4e4a-acf3-7ece01faf2fb, to topics: channels/8651eea5-a2c6-4e02-b4a6-4c46877f2497/messages","ts":"2021-01-25T10:50:16.899834916Z"}
mainflux-mqtt | {"level":"info","message":"Publish - client ID 04978536-4637-4e4a-acf3-7ece01faf2fb to the topic: channels/8651eea5-a2c6-4e02-b4a6-4c46877f2497/messages","ts":"2021-01-25T10:50:16.901016852Z"}
mainflux-influxdb-writer | {"level":"warn","message":"Failed to handle Mainflux message: failed to decode senml : invalid character 'W' looking for beginning of value","ts":"2021-01-25T10:50:16.90430598Z"}
mainflux-mqtt | {"level":"info","message":"Disconnect - Client with ID: 04978536-4637-4e4a-acf3-7ece01faf2fb and username 04978536-4637-4e4a-acf3-7ece01faf2fb disconnected","ts":"2021-01-25T10:50:16.908110846Z"}
mainflux-mqtt | {"level":"warn","message":"Broken connection for client: 04978536-4637-4e4a-acf3-7ece01faf2fb with error: failed proxying from MQTT client to MQTT broker : read tcp 172.19.0.29:52568->172.19.0.8:1883: read: connection reset by peer","ts":"2021-01-25T10:50:16.908882398Z"}
mosquitto_pub -u 04978536-4637-4e4a-acf3-7ece01faf2fb -P 12f86e57-74c1-4888-b5e1-8dd7fda78116 -t channels/8651eea5-a2c6-4e02-b4a6-4c46877f2497/messages -h localhost -m '[{"bn":"Dev1","n":"temp","v":25}, {"n":"hum","v":35}, {"bn":"Dev2", "n":"temp","v":35}, {"n":"hum","v":35}]' is work .
Misbah Ulhaq
@mmulhaq_twitter
@manuio Thank you for the guidance, its seems all the consumers are dumping the data in postgras, cassendra, mongo , influx. Is there any use case in which the data is passed to any message queue like kafka, Active MQ, RabbitMQ etc.. or I have to wirte my version of a writer
PricelessRabbit
@PricelessRabbit
Hi all. i'm having an issue with the mainflux agent and nats commands relay (im using the 0.11 tag version). the issue is that when the mqtt connection is lost and then agent reconnects to the server, it no longer relay service messages (commands coming from mainflux) into nats topic. if i restart the service then commands are relayed correctly. Is it a known bug?
PricelessRabbit
@PricelessRabbit
i reproduced the issue in my local env. i just opened an issue here mainflux/agent#48
Manuel Imperiale
@manuio
@mmulhaq_twitter This is one of the things that the mqtt-adapter can do, forward messages in a specific broker: https://github.com/mainflux/mainflux/blob/master/cmd/mqtt/main.go#L161
Manuel Imperiale
@manuio
In any case we have to document this better
@PricelessRabbit Thanks you, we will check!
Dave
@rising_dark_gitlab

@manuio @manuio @manuio @manuio

@rising_dark_gitlab yes it is. Check here: https://github.com/mainflux/devops/

Hi, thanks for that. As I mentioned I've deployed using that, and my question is about how "tested", "stable" or "supported" these things are.
I ask because somes services (nginx, postgres) uses persistent volume claims, but other (redis-authn) use local storage.
Is this because the redis storage is simply a cache (transient) or because it is misconfigured?
I'm new to mainflux and kubernetes, I've been testing on a simple docker deployment, but now I'm trying to perform a kubernetes
deployment I've having issues with pods creating local storage that has to be deleted when migrating the pod.
:confused:

Dave
@rising_dark_gitlab
I'm starting to think the "mainflux-redis-auth-master" is generated automatically from the redis imported helm chart because it is referred to by a mainflux service.
So I should download the redis charts and see how they manage persistent storage.
Dave
@rising_dark_gitlab
I think I've finally put 2 and 2 together to make 5 or 6.. the "REDIS" chart enables persistence by default, but the mainflux charts define redis-auth.master.persistence.enabled=false
Am I meant to customize that for a deployment? Or is the data in redis truly transient?
Thanks, and my apologies for the noise.
Referring back to the mainflux manual, redis only exists to provide an event source via redis streams. There is no data stored in redis, so yes, it is completely transient.
Manuel Imperiale
@manuio
@rising_dark_gitlab check here: https://github.com/mainflux/devops/blob/master/charts/mainflux/templates/things-deployment.yaml#L28 and here: https://github.com/mainflux/mainflux/blob/master/cmd/things/main.go#L138
It's used as Things service auth cache. Redis is not only used for event sourcing.
Dave
@rising_dark_gitlab
@manuio That is correct, but
https://github.com/mainflux/devops/blob/master/charts/mainflux/values.yaml#L199
Clearly shows that "persistence" is disabled for "redis-auth". Should I enable persistence for redis-auth? or is it safe to delete/recreate (will it just force re-authentication?)
Dave
@rising_dark_gitlab
interestingly, redis-streams-master DOES have persistence enabled. I'm clearly confused about this. Is it something I need to worry about? I've force migrating via kubectl drain --delete-emptydir and things seem to keep working.
Manuel Imperiale
@manuio
@rising_dark_gitlab redis-auth recreate a mapping of ID/Key in the RAM to accelerate future Identity requests. If you restart the service it it will be generated again without lossing any information since all it's persistent in Things DB. But you can also use persistent volume. It's up to you.
Dave
@rising_dark_gitlab
@manuio Thanks for clarifying. I'm happy not to use persistent volumes, I was just a little taken aback by the need for kubernetes to purge the data when migrating the pod to another node. All is clear now.
Manuel Imperiale
@manuio
@rising_dark_gitlab great! :+1:
magixmin
@magixmin
hey man , why my grafana awalys show Network Error: Bad Gateway(502)
but it`s work befor ?
magixmin
@magixmin
Alexander Teplov
@teploff

Hello everyone!
Dear @drasko @nmarcetic give some advices please.
My question concerns dynamic configuration.
Are there some ways to get data flow from new things which was added after app connection with Mainflux had established?
In more detail:
1) create Mainflux user
2) under created user create things: "a", "b", "c" and application "app" and channels.
3) Things ("a", "b", "c") start to publish data , thing("app") start to subscribe, and it works well.
4) After that user forget to add thing "d", and create it.
So how to get data flow from thing "d" without restarting subscribe thing "app"?

Thanks

Manuel Imperiale
@manuio
@molodoj88 simply connect the new thing ("d") to the same channel and you will be able to get the data without restarting anything
Drasko DRASKOVIC
@drasko
@teploff I think you are refering to Writers, which have a hard-coded config file to connect to NATS
they are stateless in order to be clusterable, so we do not change their internal state
What you should do with your app is not to subscribe directly to NATS, but rather go via frontend protocol adapters (MQTT, WS, CoAP, HTTP, ...)
this way you can subsribe/unsubscribe dynamically
NATS is in internal network and is good for internal applications / DB writers
however when building outside applications on the top of Mainflux, better is to treat them like ordinary things and use their API keys for auth
Being in internal network, NATS is not protected by the auth
That being said - you can just use config file to subscribe to all NATS channels (*)
Alexander Teplov
@teploff
@manuio @drasko thank you so much!
Drasko DRASKOVIC
@drasko
:+1:
Jonathan Dreyer
@jonathandreyer
Hi everybody, I have developed a micro-service to forward message by HTTP. Currently, it is a version for the mainflux release v0.11.0 but I will update the code for the next release (v0.12.0). The source code is available here.
As discuted with @drasko in the PR #1158, I have migrated the PR in an extension of the mainflux platform. Dont' hesitate to open issues if you have comments, ideas, etc.
Drasko DRASKOVIC
@drasko
Thanks @jonathandreyer !
I'll take a look
Jonathan Dreyer
@jonathandreyer
@drasko It is with pleasure.
I didn’t know if somebody has already tried to use in Go the upstream components, because I have some trouble to do that. For the next release of http-forwarder (v0.12.0), I try to use components which provide from "master" to anticipate the next release (and also create a CI which are tested the integration of those components into this extension).
The returned error is:
go: github.com/jonathandreyer/mainflux-httpforwarder/cmd/http-forwarder imports github.com/mainflux/mainflux/messaging/nats: package provided by github.com/mainflux/mainflux at latest version v0.11.0 but not at required version v0.11.1-0.20210209214404-f0f60e2d2a2c
Jonathan Dreyer
@jonathandreyer
Hi everybody, I have started testing the provision service with the release v0.11.0, but it returns the error:
mainflux-provision | {"level":"warn","message":"Method provision for token: <TOKEN> and things: [] took 54.290893ms to complete with error: failed to create bootstrap config : failed to create entity : 405 Method Not Allowed.","ts":"2021-02-21T21:38:20.369064276Z"} mainflux-bootstrap | {"level":"warn","message":"Method bootstrap for thing with external id <EXTERNAL_ID> took 4.531134ms to complete with error: non-existent entity.","ts":"2021-02-21T21:38:20.380319273Z"} Somebody has already had this error? For information, I have also tested with the master version and there is no error.
In reading the documentation, I have found something strange on the page for the provision service (here). The port of two first requests (8888 and 8091) seems wrong because in the .env it is indicated 8190. If you would like, I can create a PR to fix that in the doc repository.
When I have started to use the provision service (as an add-on) in following the documentation, the example does not work because an error related to certificate generation is returned. I have discovered that it is necessary to start also the certs service or disable his usage in .env by environment variable MF_PROVISION_X509_PROVISIONING. This issue has been generated by the #1221. To fix that, I propose to disable the usage of certs provisioning as it is indicated in the README.md of the provision service. @MF-Teams, what is your point of view?
I have also discovered two mistakes in the provision service, one in the docker-compose (environment variable MF_PROVISION_LOG_LEVEL is duplicated) and one in the README.md environment variables MF_PROVISION_HTTP_PORT, MF_PROVISION_SERVER_KEY, MF_PROVISION_PASS & MF_PROVISION_USER is duplicated). As for the documentation repository, I can create a PR to fix that.