1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
|
var ( localStorage = &readyStorage{stats: tsdb.NewDBStats()} scraper = &readyScrapeManager{} remoteStorage = remote.NewStorage(logger.With("component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper) fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage) )
var ( ctxWeb, cancelWeb = context.WithCancel(context.Background()) ctxRule = context.Background()
notifierManager = notifier.NewManager(&cfg.notifier, logger.With("component", "notifier"))
ctxScrape, cancelScrape = context.WithCancel(context.Background()) ctxNotify, cancelNotify = context.WithCancel(context.Background()) discoveryManagerScrape *discovery.Manager discoveryManagerNotify *discovery.Manager )
err = discovery.RegisterK8sClientMetricsWithPrometheus(prometheus.DefaultRegisterer) if err != nil { logger.Error("failed to register Kubernetes client metrics", "err", err) os.Exit(1) }
sdMetrics, err := discovery.CreateAndRegisterSDMetrics(prometheus.DefaultRegisterer) if err != nil { logger.Error("failed to register service discovery metrics", "err", err) os.Exit(1) }
discoveryManagerScrape = discovery.NewManager(ctxScrape, logger.With("component", "discovery manager scrape"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("scrape")) if discoveryManagerScrape == nil { logger.Error("failed to create a discovery manager scrape") os.Exit(1) }
discoveryManagerNotify = discovery.NewManager(ctxNotify, logger.With("component", "discovery manager notify"), prometheus.DefaultRegisterer, sdMetrics, discovery.Name("notify")) if discoveryManagerNotify == nil { logger.Error("failed to create a discovery manager notify") os.Exit(1) }
scrapeManager, err := scrape.NewManager( &cfg.scrape, logger.With("component", "scrape manager"), logging.NewJSONFileLogger, fanoutStorage, prometheus.DefaultRegisterer, ) if err != nil { logger.Error("failed to create a scrape manager", "err", err) os.Exit(1) }
var ( tracingManager = tracing.NewManager(logger)
queryEngine *promql.Engine ruleManager *rules.Manager )
if !agentMode { opts := promql.EngineOpts{ Logger: logger.With("component", "query engine"), Reg: prometheus.DefaultRegisterer, MaxSamples: cfg.queryMaxSamples, Timeout: time.Duration(cfg.queryTimeout), ActiveQueryTracker: promql.NewActiveQueryTracker(localStoragePath, cfg.queryConcurrency, logger.With("component", "activeQueryTracker")), LookbackDelta: time.Duration(cfg.lookbackDelta), NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get, EnableAtModifier: true, EnableNegativeOffset: true, EnablePerStepStats: cfg.enablePerStepStats, EnableDelayedNameRemoval: cfg.promqlEnableDelayedNameRemoval, }
queryEngine = promql.NewEngine(opts)
ruleManager = rules.NewManager(&rules.ManagerOptions{ Appendable: fanoutStorage, Queryable: localStorage, QueryFunc: rules.EngineQueryFunc(queryEngine, fanoutStorage), NotifyFunc: rules.SendAlerts(notifierManager, cfg.web.ExternalURL.String()), Context: ctxRule, ExternalURL: cfg.web.ExternalURL, Registerer: prometheus.DefaultRegisterer, Logger: logger.With("component", "rule manager"), OutageTolerance: time.Duration(cfg.outageTolerance), ForGracePeriod: time.Duration(cfg.forGracePeriod), ResendDelay: time.Duration(cfg.resendDelay), MaxConcurrentEvals: cfg.maxConcurrentEvals, ConcurrentEvalsEnabled: cfg.enableConcurrentRuleEval, DefaultRuleQueryOffset: func() time.Duration { return time.Duration(cfgFile.GlobalConfig.RuleQueryOffset) }, }) }
|