nuts-data.js 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. const fs = require('fs');
  2. const csv = require('csv-parse');
  3. const stringify = require('csv-stringify');
  4. /* Helper method to load the datasets from CSV and store it in server object */
  5. module.exports.loadDatasets = async function(filePath) {
  6. //console.log('Datasets structure loading.')
  7. let datasets = undefined
  8. return new Promise((resolve, reject) => {
  9. const stream = fs.createReadStream(filePath).pipe(csv({to_line: 1}))
  10. stream
  11. .on('data', (row) => {
  12. if (!datasets) {
  13. datasets = row
  14. }
  15. })
  16. .on('end', () => {
  17. datasets = datasets.slice(2) //TODO: FIXME: unify this number (only one ID column in the beginning)
  18. console.log('Datasets structure loaded.')
  19. resolve(datasets)
  20. })
  21. .on('error', reject)
  22. })
  23. }
  24. /* Load all the attractivness data from CSV into a server object.
  25. The data don't need to be loaded for each request then. */
  26. module.exports.loadRuralData = async function (filePath) {
  27. //console.log('Reading rural data file processing started.')
  28. let ruralData = []
  29. let columns
  30. return new Promise((resolve, reject) => {
  31. fs.createReadStream(filePath)
  32. .pipe(csv())
  33. .on('data', (row) => {
  34. if (!columns) {
  35. columns = row
  36. return
  37. }
  38. let item = {
  39. values: {}
  40. }
  41. for (let i = 0; i < columns.length; i++) {
  42. let colName = columns[i].toLowerCase()
  43. if (colName == "nuts_id") // ID of the NUTS region (EU)
  44. item.nuts = row[i]
  45. // else if (colName == "datasets") // empty datasets count
  46. // item.availableDS = datasets.length - row[i];
  47. // else if (colName == "quality")
  48. // item.quality = row[i];
  49. else if (colName == "lau2") // ID of the municipality (CZ)
  50. item.lau2 = row[i]
  51. else if (colName == "district_code") // ID of the district (Kenya+Uganda)
  52. item.district = row[i]
  53. else if (colName == "eurostat_code" || colName == "name")
  54. continue
  55. else {
  56. item.values[colName] = Number(row[i])
  57. }
  58. }
  59. ruralData.push(item)
  60. })
  61. .on('end', () => {
  62. console.log('Rural data file processing finished.');
  63. resolve(ruralData);
  64. })
  65. .on('error', reject);
  66. })
  67. }
  68. module.exports.loadOntology = async function(filePath) {
  69. return new Promise((resolve, reject) => {
  70. fs.readFile(filePath, (err, data) => {
  71. if (err) reject(err)
  72. const ontology = JSON.parse(data)
  73. resolve(ontology)
  74. })
  75. })
  76. }
  77. /**
  78. * Resolves with an array representing rows of CSV file
  79. * @param {string} inputFileName path to the CSV file with input data for clustering calculation
  80. */
  81. module.exports.loadClusteringInput = async function (inputFileName) {
  82. const clusteringData = [];
  83. /*
  84. * The parsed CSV array keeps the native csv-parser structure
  85. * for future easier serialization back to CSV file
  86. */
  87. return new Promise((resolve, reject) => {
  88. fs.createReadStream(inputFileName)
  89. .pipe(csv())
  90. .on('data', (row) => {
  91. clusteringData.push(row);
  92. })
  93. .on('end', () => {
  94. resolve(clusteringData);
  95. })
  96. .on('error', reject);
  97. });
  98. }
  99. /**
  100. * Resolves once the modified CSV file is written to fs
  101. */
  102. module.exports.modifyClusteringData = async function ({datasets, data, params, idString, outputFileName}) {
  103. // regional ID must be copied to the output as well
  104. const allowedDatasets = [idString, ...params.datasets.map(ds => ds.id)]
  105. const factorMultipliers = data[0].map((dataset) => {
  106. if (dataset === idString) return 1
  107. if (!allowedDatasets.includes(dataset)) {
  108. return 0
  109. } else {
  110. return params.datasets.find(ds => ds.id === dataset).weight
  111. }
  112. })
  113. /* The actual modification logic resides here */
  114. const modifiedData = data.map((row, idx) => {
  115. return row.map((value, i) => {
  116. if (idx == 0) {
  117. /* These are the headers */
  118. /* Have to check for both allowed datasets and zero multiplications */
  119. return allowedDatasets.includes(value) && factorMultipliers[i] !== 0 ? value : null;
  120. } else if (isNaN(value)) {
  121. /* This is the NUTS ID record at the beginning of each line */
  122. return value;
  123. }
  124. return factorMultipliers[i] === 0 ? null : value*factorMultipliers[i];
  125. }).filter(val => val !== null);
  126. });
  127. //console.log(modifiedData);
  128. if (modifiedData[0].length <= 1) {
  129. throw new Error('All datasets turned off. No data to create clusters.');
  130. }
  131. return new Promise((resolve, reject) => {
  132. stringify(modifiedData, (err, output) => {
  133. if (err) return reject(err);
  134. fs.writeFile(outputFileName, output, (err) => {
  135. if (err) reject(err);
  136. else resolve();
  137. console.log('Data modification finished.');
  138. })
  139. })
  140. });
  141. }
  142. /**
  143. * Reads the out_file.csv created by R script and saves it into an object
  144. */
  145. module.exports.loadClusters = function (filePath, idString, dataLoadedCallback) {
  146. //console.log('Reading clustering data file processing started.');
  147. let clusters = [];
  148. let columns = undefined;
  149. fs.createReadStream(filePath)
  150. .pipe(csv())
  151. .on('data', (row) => {
  152. if (!columns) {
  153. columns = row;
  154. }
  155. else {
  156. let item = {};
  157. for (let i = 0; i < columns.length; i++) {
  158. const colName = columns[i].length > 0 ? columns[i].toLowerCase() : idString;
  159. item[colName] = row[i];
  160. }
  161. clusters.push(item);
  162. }
  163. })
  164. .on('end', () => {
  165. console.log('Cluster data file processing finished.');
  166. dataLoadedCallback(clusters);
  167. });
  168. }
  169. module.exports.getFactorIndex = function (region, factor) {
  170. //console.log('getFactorIndex');
  171. //console.log('region: ' + JSON.stringify(region, null, 4));
  172. //console.log('factor: ' + JSON.stringify(factor, null, 4));
  173. let sumValue = 0;
  174. let count = 0;
  175. factor.datasets.forEach(ds => {
  176. const dataset = ds.split('/').slice(-1).pop()
  177. //console.log('factor: ' + factor.factor);
  178. const value = region.values[dataset];
  179. if (value) {
  180. sumValue += value;
  181. count++;
  182. }
  183. });
  184. return { index: sumValue / count, sumValue: sumValue, sumWeight: count * factor.weight };
  185. }
  186. /* Unused */
  187. function getDatasetFactor(datasets, colName) {
  188. for (let i = 0; i < datasets.length; i++) {
  189. if (datasets[i].Name.toLowerCase() == colName)
  190. return datasets[i].Factor;
  191. }
  192. return undefined;
  193. }