var mqtt = require('mqtt')
var defaultConnectOptions = {
keepalive: 60,
clientId: '04978536-4637-4e4a-acf3-7ece01faf2fb',
username: "04978536-4637-4e4a-acf3-7ece01faf2fb",
password:"12f86e57-74c1-4888-b5e1-8dd7fda78116",
}
var channelId = '8651eea5-a2c6-4e02-b4a6-4c46877f2497'
var topic = 'channels/' + channelId + '/messages'
var client = mqtt.connect('mqtt://iot.javodata.com',defaultConnectOptions)
client.on('connect', function () {
// client.subscribe('myChannel', function (err) {
// if (!err) {
// client.publish('presence', 'Hello mqtt')
// }
// })
client.subscribe(topic, { qos: 0 })
var domessage = [{"bn":"Dev1","n":"temp","v":40}, {"n":"hum","v":40}, {"bn":"Dev2", "n":"temp","v":40}, {"n":"hum","v":40}]
client.publish(topic, 'WS connection demo!',domessage.toString() )
})
client.on('message', function (topic, message) {
// message is Buffer
console.log(message.toString())
client.end()
})
@manuio @manuio @manuio @manuio
@rising_dark_gitlab yes it is. Check here: https://github.com/mainflux/devops/
Hi, thanks for that. As I mentioned I've deployed using that, and my question is about how "tested", "stable" or "supported" these things are.
I ask because somes services (nginx, postgres) uses persistent volume claims, but other (redis-authn) use local storage.
Is this because the redis storage is simply a cache (transient) or because it is misconfigured?
I'm new to mainflux and kubernetes, I've been testing on a simple docker deployment, but now I'm trying to perform a kubernetes
deployment I've having issues with pods creating local storage that has to be deleted when migrating the pod.
:confused:
redis-auth
recreate a mapping of ID/Key in the RAM to accelerate future Identity requests. If you restart the service it it will be generated again without lossing any information since all it's persistent in Things DB. But you can also use persistent volume. It's up to you.
Hello everyone!
Dear @drasko @nmarcetic give some advices please.
My question concerns dynamic configuration.
Are there some ways to get data flow from new things which was added after app connection with Mainflux had established?
In more detail:
1) create Mainflux user
2) under created user create things: "a", "b", "c" and application "app" and channels.
3) Things ("a", "b", "c") start to publish data , thing("app") start to subscribe, and it works well.
4) After that user forget to add thing "d", and create it.
So how to get data flow from thing "d" without restarting subscribe thing "app"?
Thanks
*
)
go: github.com/jonathandreyer/mainflux-httpforwarder/cmd/http-forwarder imports
github.com/mainflux/mainflux/messaging/nats: package provided by github.com/mainflux/mainflux at latest version v0.11.0 but not at required version v0.11.1-0.20210209214404-f0f60e2d2a2c
mainflux-provision | {"level":"warn","message":"Method provision for token: <TOKEN> and things: [] took 54.290893ms to complete with error: failed to create bootstrap config : failed to create entity : 405 Method Not Allowed.","ts":"2021-02-21T21:38:20.369064276Z"}
mainflux-bootstrap | {"level":"warn","message":"Method bootstrap for thing with external id <EXTERNAL_ID> took 4.531134ms to complete with error: non-existent entity.","ts":"2021-02-21T21:38:20.380319273Z"}
Somebody has already had this error? For information, I have also tested with the master version and there is no error.
8888
and 8091
) seems wrong because in the .env
it is indicated 8190
. If you would like, I can create a PR to fix that in the doc repository.
.env
by environment variable MF_PROVISION_X509_PROVISIONING
. This issue has been generated by the #1221. To fix that, I propose to disable the usage of certs provisioning as it is indicated in the README.md of the provision service. @MF-Teams, what is your point of view?
MF_PROVISION_LOG_LEVEL
is duplicated) and one in the README.md environment variables MF_PROVISION_HTTP_PORT
, MF_PROVISION_SERVER_KEY
, MF_PROVISION_PASS
& MF_PROVISION_USER
is duplicated). As for the documentation repository, I can create a PR to fix that.