@unpublished{Burning2021, author = {Anthony Bonato and Shahin Kamali}, title = {An improved bound on the burning number of graphs.}, journal = {CoRR}, volume = {abs/2110.01087}, year = {2021}, url = {https://arxiv.org/abs/2110.01087}, eprinttype = {arXiv}, abstract = {The burning number conjecture states that the burning number of a connected graph is at most $\lceil \sqrt{n} \rceil.$ While the conjecture is unresolved, Land and Lu proved that the burning number of a connected graph is at most $ \sqrt{(3/2)n}+O(1).$ Using an algorithmic approach, we provide an improved upper bound for the burning number of a connected graph: $$\bigg \lceil \frac{\sqrt{12n+64}+8}{3} \bigg \rceil = \sqrt{(4/3)n} +O(1)$$. } } @unpublished{SequencingAdvice21, author = {Spyros Angelopoulos and Diogo Ars{\'{e}}nio and Shahin Kamali}, title = {Competitive Sequencing with Noisy Advice}, journal = {CoRR}, volume = {abs/2111.05281}, year = {2021}, arxiv = {https://arxiv.org/abs/2111.05281}, eprinttype = {arXiv}, abstract = {Several well-studied online resource allocation problems can be formulated in terms of infinite, increasing sequences of positive values, in which each element is associated with a corresponding allocation value. Examples include problems such as online bidding, searching for a hidden target on an unbounded line, and designing interruptible algorithms based on contract scheduling. The performance of the online algorithm, in each of these problems, is measured by the competitive ratio, which describes the multiplicative performance loss due to the absence of full information on the instance. We study such competitive sequencing problems in a setting in which the online algorithm has some (potentially) erroneous information, expressed as a $k$-bit advice string, for some given $k$. For concreteness, we follow the formulation of contract scheduling as case study. We first consider the {\em untrusted} advice setting of [Angelopoulos et al, ITCS 2020], in which the objective is to obtain a Pareto-optimal schedule, considering the two extremes: either the advice is either error-free, or it is generated by a (malicious) adversary. Here, we show a Pareto-optimal schedule, using a new approach for applying the functional-based lower-bound technique due to [Gal, Israel J. Math. 1972]. Next, we study a nascent {\em noisy} advice setting, in which a number of the advice bits may be erroneous; the exact error is unknown to the online algorithm, which only has access to a pessimistic estimate (i.e., an upper bound on this error). We give improved upper bounds, but also the first lower bound on the competitive ratio of an online problem in this setting. To this end, we combine ideas from robust query-based search in arrays, and fault-tolerant contract scheduling. Last, we demonstrate how to apply the above techniques in robust optimization without predictions, and discuss how they can be applicable in the context of more general online problems. } } @article{DurrKamali21, author = {Christoph D{\"{u}}rr and Shahin Kamali}, title = {Randomized Two-valued Bounded Delay Online Buffer Management}, journal = {Oper. Res. Lett.}, volume = {49}, number = {2}, pages = {246--249}, year = {2021}, arxiv = {https://arxiv.org/abs/2004.14715}, abstract = {In the bounded delay buffer management problem unit size packets arrive online to be sent over a network link. The objective is to maximize the total weight of packets sent before their deadline. In this paper we are interested in the two-valued variant of the problem, where every packet has either low ($1$) or high priority weight ($\alpha>1$). We show that the optimal randomized competitive ratio against an oblivious adversary is $1+(\alpha-1)/(\alpha^2+\alpha)$.} } @unpublished{BpPredictions, author = {Spyros Angelopoulos and Shahin Kamali and Kimia Shadkami}, title = {Online Bin Packing with Predictions}, journal = {CoRR}, volume = {abs/2102.03311}, year = {2021}, arxiv = {https://arxiv.org/abs/2102.03311}, pages = {1-22}, github = {https://github.com/shahink84/BinPackingPredictions}, abstract = {Bin packing is a classic optimization problem with a wide range of applications from load balancing in networks to supply chain management. In this work we study the online variant of the problem, in which a sequence of items of various sizes must be placed into a minimum number of bins of uniform capacity. The online algorithm is enhanced with a (potentially erroneous) prediction concerning the frequency of item sizes in the sequence. We design and analyze online algorithms with efficient tradeoffs between their consistency (i.e., the competitive ratio assuming no prediction error) and their robustness (i.e., the competitive ratio under adversarial error), and whose performance degrades gently as a function of the error. Previous work on this problem has only addressed the extreme cases with respect to the prediction error, and has relied on overly powerful and error-free prediction oracles. } } @article{ACMTCH21, author = {Md Momin Al Aziz and Shahin Kamali and Noman Mohammed and Xiaoqian Jiang}, title = {Online Algorithm for Differentially Private Genome-wide Association Studies}, journal = {{ACM} Trans. Comput. Heal.}, volume = {2}, number = {2}, pages = {13:1--13:27}, year = {2021}, github = {https://github.com/mominbuet/DifferentialPrivacyGWAS}, abstract = {Digitization of healthcare records contributed to a large volume of functional scientific data that can help researchers to understand the behaviour of many diseases. However, the privacy implications of this data, particularly genomics data, have surfaced recently as the collection, dissemination, and analysis of human genomics data is highly sensitive. There have been multiple privacy attacks relying on the uniqueness of the human genome that reveals a participant or a certain group’s presence in a dataset. Therefore, the current data sharing policies have ruled out any public dissemination and adopted precautionary measures prior to genomics data release, which hinders timely scientific innovation. In this article, we investigate an approach that only releases the statistics from genomic data rather than the whole dataset and propose a generalized Differentially Private mechanism for Genome-wide Association Studies (GWAS). Our method provides a quantifiable privacy guarantee that adds noise to the intermediate outputs but ensures satisfactory accuracy of the private results. Furthermore, the proposed method offers multiple adjustable parameters that the data owners can set based on the optimal privacy requirements. These variables are presented as equalizers that balance between the privacy and utility of the GWAS. The method also incorporates Online Bin Packing technique, which further bounds the privacy loss linearly, growing according to the number of open bins and scales with the incoming queries. Finally, we implemented and benchmarked our approach using seven different GWAS studies to test the performance of the proposed methods. The experimental results demonstrate that for 1,000 arbitrary online queries, our algorithms are more than 80% accurate with reasonable privacy loss and exceed the state-of-the-art approaches on multiple studies (i.e., EigenStrat, LMM, TDT).} } @inproceedings{Algocloud21, author = {Shahin Kamali and Pooya Nikbakht}, title = {On the Fault-Tolerant Online Bin Packing Problem}, arxiv = {https://arxiv.org/abs/2107.02922}, booktitle = {Proc. International Conference on Algorithmic Aspects of Cloud Computing ({ALGOCLOUD})}, pages = {1-17}, year = {2021}, abstract = {We study the fault-tolerant variant of the online bin packing problem. Similar to the classic bin packing problem, an online sequence of items of various sizes should be packed into a minimum number of bins of uniform capacity. For applications such as server consolidation, where bins represent servers and items represent jobs of various loads, it is necessary to maintain fault-tolerant solutions. In a fault-tolerant packing, any job is replicated into $f+1$ servers, for some integer $f>1$, so that the failure of up to $f$ servers does not interrupt service. We build over a practical model introduced by Li and Tang [SPAA 2017] in which each job of load $x$ has a primary replica of load $x$ and $f$ standby replicas, each of load $x/\eta$, where $\eta >1$ is a parameter of the problem. Upon failure of up to $f$ servers, any primary replica in a failed bin should be replaced by one of its standby replicas so that the extra load of the new primary replica does not cause an overflow in its bin. We study a general setting in which bins might fail while the input is still being revealed. Our main contribution is an algorithm, named \Alg, which maintains fault-tolerant packings under this general setting. We prove that \Alg has an asymptotic competitive ratio of at most 1.75. This is an improvement over the best existing asymptotic competitive ratio 2 of an algorithm by Li and Tang [TPDS 2020], which works under a model that assumes bins fail only after all items are packed.} } @article{algorithmica21, author = {Joan Boyar and Lene M. Favrholdt and Shahin Kamali and Kim S. Larsen}, title = {Online Bin Covering with Advice}, arxiv = {https://arxiv.org/abs/1905.00066}, journal = {Algorithmica}, volume = {83}, number = {3}, pages = {795--821}, year = {2021}, abstract = { The bin covering problem asks for covering a maximum number of bins with an online sequence of $n$ items of different sizes in the range $(0,1]$; a bin is said to be covered if it receives items of total size at least~$1$. We study this problem in the advice setting and provide asymptotically tight bounds of $\Theta(n \log \opt)$ on the size of advice required to achieve optimal solutions. Moreover, we show that any algorithm with advice of size $o(\log \log n)$ has a competitive ratio of at most~$0.5$. In other words, advice of size $o(\log \log n)$ is useless for improving the competitive ratio of~$0.5$, attainable by an online algorithm without advice. This result highlights a difference between the bin covering and the bin packing problems in the advice model: for the bin packing problem, there are several algorithms with advice of constant size that outperform online algorithms without advice. Furthermore, we show that advice of size $O(\log \log n)$ is sufficient to achieve a competitive ratio that is arbitrarily close to~$0.53\bar{3}$ and hence strictly better than the best ratio~$0.5$ attainable by purely online algorithms. The technicalities involved in introducing and analyzing this algorithm are quite different from the existing results for the bin packing problem and confirm the different nature of these two problems. Finally, we show that a linear number of advice bits is necessary to achieve any competitive ratio better than $15/16$ for the online bin covering problem. } } @inproceedings{AAAI2022, author = {Spyros Angelopoulos and Shahin Kamali and Dehou Zhang}, title = {Online Search With Best-Price and Query-Based Predictions}, booktitle = {Proc. 36th AAAI Conference on Artificial Intelligence (AAAI)}, pages = {tbd}, year = {2022}, arxiv = {https://arxiv.org/abs/2112.01592}, github = {https://github.com/DehouZhang/Online-Search-with-Predictions}, githubtwo={https://github.com/shahink84/OnlineSearchRBIS}, abstract = {In the online (time-series) search problem, a player is presented with a sequence of prices which are revealed in an online manner. In the standard definition of the problem, for each revealed price, the player must decide irrevocably whether to accept or reject it, without knowledge of future prices (other than an upper and a lower bound on their extreme values), and the objective is to minimize the competitive ratio, namely the worst case ratio between the maximum price in the sequence and the one selected by the player. The problem formulates several applications of decision-making in the face of uncertainty on the revealed samples. Previous work on this problem has largely assumed extreme scenarios in which either the player has almost no information about the input, or the player is provided with some powerful, and error-free advice. In this work, we study learning-augmented algorithms, in which there is a potentially erroneous prediction concerning the input. Specifically, we consider two different settings: the setting in which the prediction is related to the maximum price in the sequence, as well as well as the setting in which the prediction is obtained as a response to a number of binary queries. For both settings, we provide tight, or near-tight upper and lower bounds on the worst-case performance of search algorithms as a function of the prediction error. We also provide experimental results on data obtained from stock exchange markets that confirm the theoretical analysis, and explain how our techniques can be applicable to other learning-augmented applications.} } @article{SantosVTTK19, author = {Saulo dos Santos and Muskan Vinayak and Ruppa K. Thulasiram and Parimala Thulasiraman and Shahin Kamali}, title = {Validating pairwise transactions on cryptocurrencies: a novel heuristics and network simulation}, journal = {J. Bank. Financial Technol.}, volume = {3}, number = {1}, pages = {71--81}, year = {2019}, abstract = {Bitcoin, along with other cryptocurrencies, has received a lot of attention in the recent past. The popularity of Bitcoin has increased the volume of transactions in an unprecedented way. The time to complete a simple pairwise transaction is determined by proof-of-work which requires a significant time compared to other components of the Bitcoin protocol. In this study, we propose a heuristic for validating pairwise transactions on cryptocurrencies. Our heuristic is based on simulating the participants sending and receiving transactions. We use SHA256 algorithm to enhance our solution for pairwise transactions, creating a local Blockchain of transactions, which has been previously used in the development of various Blockchain systems. We tested in-file and in-memory configurations in our simulations with two million transactions, which respectively took 290.39 and 5.34 s. The peak number for transactions-per-second was 6.88 when using the in-file setting and 374.25 for in-memory setting. From these experiments, we conclude that the number of transactions processed per second improves by increasing the block size as well as avoiding file access. We also implemented a parallel version of our algorithm in order to simulate the sharding technique and ultimately achieve further improvements in performance. In addition, we used Bitcoin simulator to analyze the impact of increasing the block size on the number of forks. Our simulations show that using a secondary relay network, such as FIBRE, to propagate the new blocks significantly reduces the number of forks and consequently the number of stale blocks.} } @inproceedings{APOCS2021, title = {Beyond Worst-case Analysis of Multicore Caching Strategies}, booktitle = {Proc. {SIAM-ACM} Symposium on Algorithmic Principles of Computer Systems ({APoCS})}, author = {Shahin Kamali and Helen Xu}, pages = {1-15}, year = {2021}, arxiv = {https://arxiv.org/abs/2011.02046}, abstract = {Every processor with multiple cores sharing a cache needs to implement a cache-replacement algorithm. Previous work demonstrated that the competitive ratio of a large class of online algorithms, including Least-Recently-Used (LRU), grows with the length of the input. Furthermore, even offline algorithms like Furthest-In-Future, the optimal algorithm in single-core caching, cannot compete in the multicore setting. These negative results motivate a more in-depth comparison of multicore caching algorithms via alternative analysis measures. Specifically, the power of the adversary to adapt to online algorithms suggests the need for a direct comparison of online algorithms to each other. In this paper, we introduce cyclic analysis, a generalization of bijective analysis introduced by Angelopoulos and Schweitzer [JACM'13]. Cyclic analysis captures the advantages of bijective analysis while offering flexibility that makes it more useful for comparing algorithms for a variety online problems. In particular, we take the first steps beyond worst-case analysis for analysis of multicore caching algorithms. We use cyclic analysis to establish relationships between multicore caching algorithms, including the advantage of LRU over all other multicore caching algorithms in the presence of locality of reference.} } @inproceedings{AAAI2021, author = {Spyros Angelopoulos and Shahin Kamali}, title = {Contract Scheduling With Predictions}, booktitle = {Proc. 35th AAAI Conference on Artificial Intelligence (AAAI)}, pages = {11726-11733}, year = {2021}, arxiv = {https://arxiv.org/abs/2011.12439}, github = {https://github.com/shahink84/ContractSchdulingWithPredictions}, abstract = {Contract scheduling is a general technique that allows to design a system with interruptible capabilities, given an algorithm that is not necessarily interruptible. Previous work on this topic has largely assumed that the interruption is a worst-case deadline that is unknown to the scheduler. In this work, we study the setting in which there is a potentially erroneous {\em prediction} concerning the interruption. Specifically, we consider the setting in which the prediction describes the time that the interruption occurs, as well as the setting in which the prediction is obtained as a response to a single or multiple binary queries. For both settings, we investigate tradeoffs between the robustness (i.e., the worst-case performance assuming adversarial prediction) and the consistency (i.e, the performance assuming that the prediction is error-free), both from the side of positive and negative results.} } @inproceedings{COCOA20, title={Cutting Stock with Rotation: Packing Square Items into Square Bins}, author={Shahin kamali and Pooya Nikbakht}, booktitle={Proc. 14th International Conference on Combinatorial Optimization and Applications (COCOA) }, year={2020}, pages = {530-544}, url = {https://link.springer.com/chapter/10.1007/978-3-030-64843-5_36}, abstract = {In the square packing problem, the goal is to place a multi-set of square-items of various sizes into a minimum number of square-bins of equal size. Items are assumed to have various side-lengths of at most 1, and bins have uniform side-length 1. Despite being studied previously, the existing models for the problem do not allow rotation of items. In this paper, we consider the square packing problem in the presence of rotation. As expected, we can show that the problem is NP-hard. We study the problem under a resource augmented setting where an approximation algorithm can use bins of size $1 + \alpha$, for some $\alpha > 0$ , while the algorithm’s packing is compared to an optimal packing into bins of size 1. Under this setting, we show that the problem admits an asymptotic polynomial time scheme (APTAS) whose solutions can be encoded in a poly-logarithmic number of bits.} } @inproceedings{BLOCKCHAIN20, author = {Saulo dos Santos and Shahin Kamali and Ruppa K. Thulasiram}, title = {Candidate Set Formation Policy for Mining Pool}, booktitle = {Proc. IEEE Conference on Blockchain (Blockchain'20)}, url = {https://ieeexplore.ieee.org/document/9284733}, pages = {415-420}, year = {2020}, abstract = {Cryptocurrencies like Bitcoin use the blockchain technology to record transactions in a distributed and secure way. Miners are distributed entities responsible for validating the transactions in the network and producing and verifying new blocks of confirmed transactions. In addition, they are responsible for keeping the protocol's integrity, protecting it against the double-spending problem. Miners are rewarded when they add new blocks to the blockchain. The reward consists of fresh coins created after adding a new block as well as the fees collected from the transactions inside the added block. The amount of new coins generated with new blocks is diminishing by the protocol over time. As such, the significance of collected fees is increasing for the miners.A recent trend in large mining pools is to allow miners to select transactions in the block they aim to mine. Allowing miners to select transactions increases transparency via discretization and also helps to avoid conflict of interest with the mining pool managers. Assuming that forming blocks is in miners' hands, miners should have a strategy to maintain transactions inside the block in a way to maximize their collected fees. The mining process is a random process that is "progress free". That is, a miner can update the transactions inside the block without any impact on its chance of succeeding in adding the block. Given that transactions with higher fees might arrive at any time in an online manner, it is profitable for a miner to "refresh" its block during the mining process. In this paper, we study the impact of refreshing blocks via an experimental analysis on real-world data from Bitcoin on a controlled environment that is carefully tuned to match the real world. Our results indicate that refreshing blocks is essential for increasing miners' collected fees, and overlooking it will have a significant impact on miners' revenues.} } @inproceedings{CCCG20, title={Non-crossing matching of online points}, author={Prosenjit Bose and Paz Carmi and Stephane Durocher and Shahin Kamali and Arezoo Sajadpour}, booktitle={Proc. 32nd Canadian Conf. on Computational Geometry (CCCG)}, year={2020}, pages = {233-239}, url = {https://vga.usask.ca/cccg2020/papers/Non-Crossing%20Matching%20of%20Online%20Points.pdf}, abstract = {We consider the non-crossing matching problem in the online setting. In the monochromatic setting, a sequence of points in general position in the plane is revealed in an online manner, and the goal is to create a maximum matching of these points such that the line segments connecting pairs of matched points do not cross. The problem is online in the sense that the decisions to match each arriving point are irrevocable and should be taken without prior knowledge about forthcoming points. The bichromatic setting is defined similarly, except that half of the points are red and the rest are blue, and each matched pair consists of one red point and one blue point. Inspired by the online bipartite matching problem~\cite{KarpVV90}, where vertices on one side of a bipartite graph appear in an online manner, we assume red points are given a priory and blue points arrive in an online manner. In the offline setting, both the monochromatic and bichromatic problems can be solved optimally with all pairs matched. In the online setting of the monochromatic version, we show that a greedy family of algorithms matches $2\lceil (n-1)/3\rceil$ points, where $n$ is the number of input points. Meanwhile, we prove that no deterministic online algorithm can match more than $2\lceil (n-1)/3 \rceil$ points, i.e., the greedy strategy is optimal. In the bichromatic setting, we introduce an algorithm that matches $\log n - o(\log n)$ points for instances consisting of $n$ red and $n$ blue points, and show that no deterministic algorithm can do better. We also consider the problem under the advice setting, where an online algorithm receives some bits of advice about the input sequence, and provide lower and upper bounds for the amount of advice that is required and sufficient to match all points.} } @inproceedings{DCC20, title={Compact Polyominoes (short paper)}, author={Shahin Kamali}, booktitle={Data Compression Conference (DCC)}, pages={346}, year={2021}, url = {https://ieeexplore.ieee.org/document/9418759}, organization={IEEE}, abstract = {We provide a compact representation of polyominoes with $n$ cells that supports navigation and visibility queries in constant time. Our oracle takes $3n + o(n)$ bits. Previous enumeration efforts indicate that at least $2.00091 n - o(n)$ bits (likely $2.021 n - o(n)$ bits) are required to distinguish polyominoes, hence confirming that our oracle is compact.} } @inproceedings{DCC20, title={Compact Representation of Graphs with Small Bandwidth and Treedepth}, author={Kamali, Shahin}, booktitle={Data Compression Conference (DCC)}, pages={233-242}, year={2020}, url = {https://ieeexplore-ieee-org.uml.idm.oclc.org/stamp/stamp.jsp?arnumber=9105777}, organization={IEEE}, abstract = {We consider the problem of compact representation of graphs with small \emph{bandwidth} as well as graphs with small \emph{treedepth}. These parameters capture structural properties of graphs that come in useful in certain applications. We present simple navigation oracles that support degree and adjacency queries in constant time and neighborhood query in constant time per neighbor. For graphs of bandwidth $k$, our oracle takes $(k+ \lceil \log 2k \rceil) n + o(kn)$ bits. By way of an enumeration technique, we show that $(k - 5\sqrt{k} - 4)n - O(k^{2})$ bits are required to encode a graph of bandwidth $k$ and size $n$. For graphs of treedepth $k$, our oracle takes $(k + \lceil \log k \rceil + 2)n + o(kn)$ bits. We present a lower bound that certifies our oracle is succinct for certain values of $k \in o(n)$. } } @inproceedings{SPAA20, author = {Shahin Kamali and Helen Xu}, title = {Multicore Paging Algorithms Cannot Be Competitive (brief announcement)}, booktitle = {Proc. 32nd {ACM} Symposium on Parallelism in Algorithms and Architectures (SPAA)}, pages = {547--549}, publisher = {{ACM}}, year = {2020}, url = {https://dl.acm.org/doi/10.1145/3350755.3400270}, abstract = {Every processor with multiple cores sharing a cache needs to implement a page-replacement algorithm. Lopez-Ortiz and Salinger [ITCS 2012] demonstrated that competitive ratio of canonical paging algorithms such as Least-Recently-Used (LRU) and Furthest-In-Future (FIF) grows with the length of the input. In this paper, we answer an open question about the existence of competitive multicore paging algorithms in the negative. Specifically, we show that all lazy algorithms, which include all practical algorithms, cannot be competitive against the optimal offline algorithm.} } @inproceedings{ITCS20, author = {Spyros Angelopoulos and Christoph D{\"{u}}rr and Shendan Jin and Shahin Kamali and Marc P. Renault}, title = {Online Computation with Untrusted Advice}, booktitle = {Proc. 11th Innovations in Theoretical Computer Science Conference Conference (ITCS)}, pages = {52:1--52:15}, year = {2020}, arxiv = {https://arxiv.org/abs/1905.05655}, abstract = {The advice model of online computation captures a setting in which the algorithm is given some partial information concerning the request sequence. This paradigm allows to establish tradeoffs between the amount of this additional information and the performance of the online algorithm. However, if the advice is corrupt or, worse, if it comes from a malicious source, the algorithm may perform poorly. In this work, we study online computation in a setting in which the advice is provided by an untrusted source. Our objective is to quantify the impact of untrusted advice so as to design and analyze online algorithms that are robust and perform well even when the advice is generated in a malicious, adversarial manner. To this end, we focus on well-studied online problems such as ski rental, online bidding, bin packing, and list update. For ski-rental and online bidding, we show how to obtain algorithms that are Pareto-optimal with respect to the competitive ratios achieved; this improves upon the framework of Purohit et al. [NeurIPS 2018] in which Pareto-optimality is not necessarily guaranteed. For bin packing and list update, we give online algorithms with worst-case tradeoffs in their competitiveness, depending on whether the advice is trusted or not; this is motivated by work of Lykouris and Vassilvitskii [ICML 2018] on the paging problem, but in which the competitiveness depends on the reliability of the advice. Furthermore, we demonstrate how to prove lower bounds, within this model, on the tradeoff between the number of advice bits and the competitiveness of any online algorithm. Last, we study the effect of randomization: here we show that for ski-rental there is a randomized algorithm that Pareto-dominates any deterministic algorithm with advice of any size. We also show that a single random bit is not always inferior to a single advice bit, as it happens in the standard model.} } @inproceedings{SOFSEM20, author={Shahin Kamali and Avery Miller and Kenny Zhang}, title={Burning Two Worlds: Algorithms for Burning Dense and Tree-like Graphs}, booktitle={Proc. 41st International Conference on Current Trends in Theory and Practice of Informatics (SOFSEM)}, pages={113-124}, year={2020}, arxiv = {https://arxiv.org/abs/1909.00530}, organization={Springer, Berlin, Heidelberg}, abstract = {Graph burning is a model for the spread of social influence in networks. The objective is to measure how quickly a fire (e.g., a piece of fake news) can be spread in a network. The burning process takes place in discrete rounds. In each round, a new fire breaks out at a selected vertex and burns it. Meanwhile, the old fires extend to their adjacent vertices and burn them. A burning schedule selects where the new fire breaks out in each round, and the burning problem asks for a schedule that burns all vertices in a minimum number of rounds, termed the burning number of the graph. The burning problem is known to be NP-hard even when the graph is a tree or a disjoint set of paths. For connected graphs, it has been conjectured that burning takes at most $\lceil \sqrt{n} \rceil$ rounds. In this paper, we approach the algorithmic study of graph burning from two directions. First, we consider graphs with minimum degree $\delta$. We present an algorithm that burns any graph of size $n$ in at most $\sqrt{24n/(delta+1)}$ rounds. In particular, for dense graphs with $\delta \in \Theta(n)$, all vertices are burned in a constant number of rounds. More interestingly, even when \delta is a constant that is independent of the graph size, our algorithm answers the graph-burning conjecture in the affirmative by burning the graph in at most \lceil \sqrt{n} \rceil rounds. In the second part of the paper, we consider burning graphs with bounded path-length or tree-length. These include many graph families including connected interval graphs (with path-length 1) and connected chordal graphs (with tree-length 1). We show that any graph with path-length $pl$ and diameter $d$ can be burned in $\lceil \sqrt{d-1} \rceil + pl$ rounds. Our algorithm ensures an approximation ratio of $1 + o(1)$ for graphs of bounded path-length. We introduce another algorithm that achieves an approximation ratio of $2 + o(1)$ for burning graphs of bounded tree-length. The approximation factor of our algorithms are improvements over the best existing approximation factor of 3 for burning general graphs.} } @inproceedings{BLOCKCHAIN19, author = {Saulo dos Santos and Chukwuocha Chibundo and Shahin Kamali and Ruppa K. Thulasiram}, title = {An Efficient Miner Strategy for Selecting Cryptocurrency Transactions}, booktitle = {Proc. IEEE Conference on Blockchain (Blockchain'19)}, url = {https://ieeexplore.ieee.org/abstract/document/8946174}, pages = {116--123}, year = {2019}, abstract = {Cryptocurrencies like Bitcoin use the blockchain technology to record transactions in a distributed and secure way. Each block contains a cryptographic hash of the previous block in addition to a set of transactions that it records. The first step for a miner to add a new block to the blockchain is to select a set of pending transactions from a mempool. The total size of selected transactions should not exceed the fixed capacity of blocks. If a miner completes the computationally-hard task of finding the cryptographic hash of the formed block, the block can be added to the blockchain in which case the transactions in that block will become complete. Transaction might have a fee that is granted to the miner upon being complete. As such, when forming a new block, miners tend to select transactions that offer the best fees. Finding a set of transactions with maximum total fee that fit into a block translates to the classic knapsack problem, which is an NP-hard problem. Meanwhile, miners are in fierce competition for mining blocks and hence cannot dedicate too much computational power for selecting the best set of transactions. Most existing solutions to tackle this problem are based on sorting the set of pending transactions by the ratio between their fee and their size. While the sorting step is not a bottleneck in normal situations, transaction can grow explosively in case of a market turbulence like that of 2017. Meanwhile, the total number of transactions increases over time. As such, it is desirable to have an efficient strategy that does not rely on sorting transactions before forming a block. In this paper, we review some of the existing strategies for miners to select transactions from the mempool. We also introduce a robust solution called Size-Density Table (SDT) for selecting transactions that does not use sorting. We perform a theoretical and experimental analysis of our solutions to compare it with other strategies. Our results indicate that our algorithm runs faster than previous approaches while the quality of its solutions (the total fees collected in its blocks) is also slightly better than the best existing strategies.} } @inproceedings{TAMC19, author = {Anthony Bonato and Shahin Kamali}, title = {Approximation Algorithms for Graph Burning}, booktitle = {Proc. 15th Annual Conference on Theory and Applications of Models of Computation (TAMC)}, pages = {74--92}, year = {2019}, arxiv = {https://arxiv.org/abs/1811.04449}, abstract = {Numerous approaches study the vulnerability of networks against social contagion. Graph burning studies how fast a contagion, modeled as a set of fires, spreads in a graph. The burning process takes place in synchronous, discrete rounds. In each round, a fire breaks out at a vertex, and the fire spreads to all vertices that are adjacent to a burning vertex. The selection of vertices where fires start defines a schedule that indicates the number of rounds required to burn all vertices. Given a graph, the objective of an algorithm is to find a schedule that minimizes the number of rounds to burn graph. Finding the optimal schedule is known to be NP-hard, and the problem remains NP-hard when the graph is a tree or a set of disjoint paths. The only known algorithm is an approximation algorithm for disjoint paths, which has an approximation ratio of 1.5. We present approximation algorithms for graph burning. For general graphs, we introduce an algorithm with an approximation ratio of 3. When the graph is a tree, we present another algorithm with approximation ratio 2. Moreover, we consider a setting where the graph is a forest of disjoint paths. In this setting, when the number of paths is constant, we provide an optimal algorithm which runs in polynomial time. When the number of paths is more than a constant, we provide two approximation schemes: first, under a regularity condition where paths have asymptotically equal lengths, we show the problem admits an approximation scheme which is fully polynomial. Second, for a general setting where the regularity condition does not necessarily hold, we provide another approximation scheme which runs in time polynomial in the size of the graph.} } @inproceedings{WADS19, author = {Joan Boyar and Lene M. Favrholdt and Shahin Kamali and Kim S. Larsen}, title = {Online Bin Covering with Advice}, booktitle={Proc. 16th International Symposium on Algorithms and Data Structures (WADS)}, pages = {225--238}, arxiv = {https://arxiv.org/abs/1905.00066}, year = {2019}, abstract = {The bin covering problem asks for covering a maximum number of bins with an online sequence of $n$ items of different sizes in the range $(0,1]$; a bin is said to be covered if it receives items of total size at least 1. We study this problem in the advice setting and provide tight bounds for the size of advice required to achieve optimal solutions. Moreover, we show that any algorithm with advice of size $o(\log \log n)$ has a competitive ratio of at most 0.5. In other words, advice of size $o(\log \log n)$ is useless for improving the competitive ratio of 0.5, attainable by an online algorithm without advice. This result highlights a difference between the bin covering and the bin packing problems in the advice model: for the bin packing problem, there are several algorithms with advice of constant size that outperform online algorithms without advice. Furthermore, we show that advice of size $O(\log \log n)$ is sufficient to achieve a competitive ratio that is arbitrarily close to $0.53\bar{3}$ and hence strictly better than the best ratio $0.5$ attainable by purely online algorithms. The technicalities involved in introducing and analyzing this algorithm are quite different from the existing results for the bin packing problem and confirm the different nature of these two problems. Finally, we show that a linear number of bits of advice is necessary to achieve any competitive ratio better than 15/16 for the online bin covering problem. } } @inproceedings{SPIRE19, author = { Arezoo Abdollahi and Neil Bruce and Shahin Kamali and Rezaul Karim}, title = {Lossless Image Compression Using List Update Algorithms}, url = {https://www.springerprofessional.de/en/lossless-image-compression-using-list-update-algorithms/17242680}, github = {https://github.com/ArezooAbdollahi/Lossless-image-compression-with-Hilbert-Curve-and-Move-to-Front}, booktitle={Proc. 26th International Symposium on String Processing and Information Retrieval (SPIRE)}, pages = {16--34}, year = {2019}, abstract = {We consider lossless image compression using a technique similar to bZip2 for sequential data. Given an image represented with a matrix of pixel values, we consider different approaches for linearising the image into a sequence and then encoding the sequence using the Move-To-Front list update algorithm. In both linearisation and encoding stages, we exploit the locality present in the images to achieve encodings that are as compressed as possible. We consider a few approaches, and in particular Hilbert space-filling curves, for linearising the image. Using a natural model of locality for images introduced by Albers et al. [J. Comput. Syst. Sci. 2015], we establish the advantage of Hilbert space-filling curves over other linearisation techniques such as row-major or column-major curves for preserving the locality during the linearisation. We also use a result by Angelopoulos and Schweitzer [J. ACM 2013] to select Move-To-Front as the best list update algorithm for encoding the linearised sequence. In summary, our theoretical results show that a combination of Hilbert space-filling curves and Move-To-Front encoding has advantage over other approaches. We verify this with experiments on a dataset consisting of different categories of images. } } @proceedings{CCCG2018, editor = {Stephane Durocher and Shahin Kamali}, title = {Proceedings of the 30th Canadian Conference on Computational Geometry, {CCCG} 2018}, year = {2018}, abstract = {This volume contains the proceedings of the 30th Canadian Conference on Computational Geometry (CCCG 2018), which took place on August 8–10, 2018, at the University of Manitoba, in Winnipeg, Manitoba, Canada. These proceedings will be made available electronically after the conclusion of the conference on the CCCG website: http://www.cccg.ca/. We are grateful to the CCCG 2018 Program Committee and external reviewers, for their time and effort carefully reviewing all submissions. Each submission was reviewed by a minimum of three program committee members. The program committee accepted 46 papers out of 65 papers submitted. We thank the authors of all submitted papers and all conference attendees. We thank the invited speakers: Dr. Matthew (Matya) Katz (Paul Erd˝os Memorial Lecture), Dr. Marc van Kreveld (Ferran Hurtado Memorial Lecture), and Dr. Carola Wenk. In addition, we are grateful for the tremendous efforts of the CCCG 2018 Local Organizing Committee for their assistance; in particular, we would like to acknowledge Avery Miller and Victoria Harris. We acknowledge the generous financial support from our sponsors: the Pacific Institute for the Mathematical Sciences (PIMS), Elsevier, the Fields Institute for Research in Mathematical Sciences, and the University of Manitoba. } } @article{Algorithmica2018, author = {Shahin Kamali}, title = {Compact Representation of Graphs of Small Clique-Width}, journal = {Algorithmica}, url = {https://dl.acm.org/doi/10.1007/s00453-017-0365-6}, volume = {80}, number = {7}, pages = {2106--2131}, year = {2018}, abstract = {The notion of clique-width for graphs offers many research topics and has received considerable attention in the past decade. A graph has clique-width $k$ if it can be represented as an algebraic expression on $k$ labels associated with its vertices. Many computationally hard problems can be solved in polynomial time for graphs of bounded clique-width. Interestingly also, many graph families %that arise in practice have bounded clique-width. In this paper, we consider the problem of preprocessing a graph of size $n$ and clique-width $k$ to build space-efficient data structures that support basic graph navigation queries. First, by way of a counting argument, which is of interest in its own right, we prove the space requirement of any representation is $\Omega(kn)$. Then we present a navigation oracle which answers adjacency query in constant time and neighborhood queries in constant time per neighbor. This oracle uses $O(kn)$ space (i.e., $O(kn)$ bits). We also present a degree query which reports the degree of each given vertex in $O(k\log^*(n))$ time using $O(kn \log^*(n))$ bits. } } @article{ToCS18, author = {Spyros Angelopoulos and Christoph D{\"{u}}rr and Shahin Kamali and Marc P. Renault and Adi Ros{\'{e}}n}, title = {Online Bin Packing with Advice of Small Size}, journal = {Theory Comput. Syst.}, volume = {62}, number = {8}, pages = {2006--2034}, year = {2018}, url = {https://www.irif.fr/~adiro/publ/ADKRR.pdf}, abstract = { In this paper, we study the advice complexity of the online bin packing problem. In this well-studied setting, the online algorithm is supplemented with some additional information concerning the input. We improve upon both known upper and lower bounds of online algorithms for this problem. On the positive side, we first provide a relatively simple algorithm that achieves a competitive ratio arbitrarily close to 1.5, using constant-size advice. Our result implies that 16 bits of advice suffice to obtain a competitive ratio better than any online algorithm without advice, thus improving the previously known bound of O(log(n)) bits required to attain this performance. In addition, we introduce a more complex algorithm that still requires only constant-size advice, and has a competitive ratio arbitrarily close to 1.47012. This is the currently best performance of any online bin packing algorithm with sublinear advice. On the negative side, we extend a construction due to Boyar et al. (Algorithmica 74(1), 507-527 2016) so as to show that no online algorithm with sub-linear advice can be 7/6-competitive, improving on the lower bound of 9/8 from Boyar et al.} } @article{IEEESelected17, title={Distributed Service Function Chaining}, author={Milad Ghaznavi and Nashid Shahriar and Shahin Kamali and Reaz Ahmed and Raouf Boutaba}, journal={IEEE Journal on Selected Areas in Communications}, volume = {35}, number = {11}, pages = {2479--2489}, year = {2017}, url = {https://ieeexplore-ieee-org.uml.idm.oclc.org/document/8058439}, abstract = {A service-function chain, or simply a chain, is an ordered sequence of service functions, e.g., firewalls and load balancers, composing a service. A chain deployment involves selecting and instantiating a number of virtual network functions (VNFs), i.e., softwarized service functions, placing VNF instances, and routing traffic through them. In the current optimization-models of a chain deployment, the instances of the same function are assumed to be identical, while typical service providers offer VNFs with heterogeneous throughput and resource configurations. The VNF instances of the same function are installed in a single physical machine, which limits a chain to the throughput of a few instances that can be installed in one physical machine. Furthermore, the selection, placement, and routing problems are solved in isolation. We present distributed service function chaining that coordinates these operations, places VNF-instances of the same function distributedly, and selects appropriate instances from typical VNF offerings. Such a deployment uses network resources more efficiently and decouples a chain's throughput from that of physical machines. We formulate this deployment as a mixed integer programming (MIP) model, prove its NP-Hardness, and develop a local search heuristic called Kariz. Extensive experiments demonstrate that Kariz achieves a competitive acceptance-ratio of 76%-100% with an extra cost of less than 24% compared with the MIP model.} } @article{DAM17, title={Efficient Broadcast Trees for Weighted Vertices}, author={Hovhannes A Harutyunyan and Shahin Kamali}, journal={Discrete Applied Mathematics}, volume={216}, pages={598--608}, year={2017}, publisher={North-Holland}, abstract = {In this paper, we consider a weighted-vertex model for information dissemination in communication networks. Each node of the network (e.g., a processing machine) is assigned a weight, which represents the delay of the node after receiving data and before sending it to its neighbors. We are interested in the broadcasting problem in which the goal is to disseminate information from one node to all other nodes as quickly as possible. We introduce a simple algorithm for optimal broadcasting from a given node in weighted-vertex trees. We also study the problem of constructing efficient broadcast trees that minimize the required time for broadcasting. We investigate two variants of this problem. First, we show that, given a set of vertices with specified weights, one can construct a tree that connects all vertices and enables broadcasting from any vertex in the optimal time of $\Theta(\log n)$. Second, given a set of weighted vertices among which one vertex is specified as the originator, we introduce a polynomial algorithm that connects vertices with a tree in which broadcasting from the originator completes in minimum time.} } @article{InfoComp17ListUpdate, title={On the List Update Problem with Advice}, author={Joan Boyar and Shahin Kamali and Kim S. Larsen and Alejandro Lopez-Ortiz}, journal={Information and Computation}, volume={253}, pages={411--423}, year={2017}, arxiv = {https://arxiv.org/abs/1311.7357}, publisher={Academic Press}, abstract = {We study the online list update problem under the advice model of computation. Under this model, an online algorithm receives partial information about the unknown parts of the input in the form of some bits of advice generated by a benevolent offline oracle. We show that advice of linear size is required and sufficient for a deterministic algorithm to achieve an optimal solution or even a competitive ratio better than 15/14. On the other hand, we show that surprisingly two bits of advice are sufficient to break the lower bound of 2 on the competitive ratio of deterministic online algorithms and achieve a deterministic algorithm with a competitive ratio of 5/3. In this upper-bound argument, the bits of advice determine the algorithm with smaller cost among three classical online algorithms, TIMESTAMP and two members of the MTF2 family of algorithms. We also show that MTF2 algorithms are 2.5-competitive.} } @inproceedings{ICDCS17, title={Robust Multi-tenant Server Consolidation in the Cloud for Data Analytics Workloads}, author={Joseph Mate and Khuzaima Daudjee and Shahin Kamali}, booktitle={Proc. 37th International Conf. International Conference on Distributed Computing Systems ({ICDCS})}, pages={2111--2118}, year={2017}, organization={IEEE}, url = {https://ieeexplore-ieee-org.uml.idm.oclc.org/document/7980157}, abstract = {Server consolidation is the hosting of multiple tenants on a server machine. Given a sequence of data analytics tenant loads defined by the amount of resources that the tenants require and a service-level agreement (SLA) between the customer and the cloud service provider, significant cost savings can be achieved by consolidating multiple tenants. Since server machines can fail causing their tenants to become unavailable, service providers can place replicas of each tenant on multiple servers and reserve capacity to ensure that tenant failover will not result in overload on any remaining server. We present the CUBEFIT algorithm for server consolidation that reduces costs by utilizing fewer servers than existing approaches for data analytics workloads. Unlike existing consolidation algorithms, CUBEFIT can tolerate multiple server failures while ensuring that no server becomes overloaded. Through theoretical analysis and experimental evaluation, we show that CUBEFIT is superior to existing algorithms and produces near-optimal tenant allocation when the number of tenants is large. Through evaluation and deployment on a cluster of 73 machines as well as through simulation studies, we experimentally demonstrate the efficacy of CUBEFIT.} } @inproceedings{DCC16, title={Compact Navigation Oracles for Graphs with Bounded Clique-Width}, author={Shahin Kamali}, booktitle={Data Compression Conference (DCC)}, url = {https://ieeexplore-ieee-org.uml.idm.oclc.org/document/7786201}, pages={566--576}, year={2016}, organization={IEEE}, abstract = {The notion of clique-width for graphs is a relatively new topic which has received attention in the past decade. A graph has bounded clique-width if it can be represented as an algebraic expression on a constant number of labels associated with its vertices. Many computationally hard problems can be solved in polynomial time for graphs with bounded clique-width. Interestingly also, many graph families that arise in practice have bounded clique-width. In this paper, we present compact navigation oracles for graphs with bounded clique-width. Our oracles answer adjacency and neighborhood queries in constant time using O(n) space (i.e., O(n) bits) for a graph of size n, and report the degree of each given vertex, also in constant time, using O(n lg lg n) bits. } } @article{Algorithmica16BP, title={Online Bin Packing with Advice}, author={Joan Boyar and Shahin Kamali and Kim S Larsen and Alejandro Lopez-Ortiz}, journal={Algorithmica}, volume={74}, number={1}, pages={507--527}, year={2016}, publisher={Springer US}, arxiv = {Online Bin Packing with Advice}, abstract = {We consider the online bin packing problem under the advice complexity model where the 'online constraint' is relaxed and an algorithm receives partial information about the future requests. We provide tight upper and lower bounds for the amount of advice an algorithm needs to achieve an optimal packing. We also introduce an algorithm that, when provided with log n + o(log n) bits of advice, achieves a competitive ratio of 3/2 for the general problem. This algorithm is simple and is expected to find real-world applications. We introduce another algorithm that receives 2n + o(n) bits of advice and achieves a competitive ratio of 4/3 + epsilon. Finally, we provide a lower bound argument that implies that advice of linear size is required for an algorithm to achieve a competitive ratio better than 9/8.} } @article{TOCS16, title={On the Advice Complexity of the k-server Problem Under Sparse Metrics}, author={Sushmita Gupta and Shahin Kamali and Alejandro Lopez-Ortiz}, journal={Theory of Computing Systems}, volume={59}, number={3}, pages={476--499}, year={2016}, publisher={Springer US}, arxiv = {https://arxiv.org/abs/1305.2108}, abstract = {We consider the k-server problem under the advice model of computation when the underlying metric space is sparse. On one side, we introduce $\Theta(1)$-competitive algorithms for a wide range of sparse graphs, which require advice of (almost) linear size. Namely, we show that for graphs of size $N$ and treewidth $\alpha$, there is an online algorithm which receives $O{n (\log \alpha + \log \log N)}$ bits of advice and optimally serves a sequence of length $n$. We also show that if a graph admits a system of $\mu$ collective tree $(q,r)$-spanners, then there is a $(q+r)$-competitive algorithm which receives $\oh{n (\log \mu + \log \log N)}$ bits of advice. Among other results, this gives a $3$-competitive algorithm for planar graphs, provided with $\oh{n \log \log N}$ bits of advice. On the other side, we show that advice of size $\Omega(n)$ is required to obtain a $1$-competitive algorithm for sequences of size $n$ even for the 2-server problem on a path metric of size $N \geq 5$. Through another lower bound argument, we show that at least $\frac{n}{2}(\log \alpha- 1.22)$ bits of advice is required to obtain an optimal solution for metric spaces of treewidth $\alpha$, where $4 \leq \alpha < 2k$. } } @incollection{Encyclopedia16, author = {Shahin Kamali}, title = {Online List Update}, booktitle = {Encyclopedia of Algorithms}, pages = {1448--1451}, year = {2016}, url = {https://link.springer.com/referenceworkentry/10.1007%2F978-1-4939-2864-4_266}, abstract ={This article reviews the list update problem, classic results on deterministic and randomized algorithms, and the impact of the locality of reference on the performance of online list update algorithms. The article concludes with a quick overview of the applications of the list update problem and open questions.} } @inproceedings{EDBT15, author = {Daniel Nicoara and Shahin Kamali and Khuzaima Daudjee and Lei Chen}, title = {Hermes: Dynamic Partitioning for Distributed Social Network Graph Databases}, booktitle = {Proc. 18th International Conf. on Extending Database Technology (EDBT)}, pages = {25--36}, year = {2015}, url = {http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.682.5523}, abstract = {Social networks are large graphs that require multiple graph database servers to store and manage them. Each database server hosts a graph partition with the objectives of bal-ancing server loads, reducing remote traversals (edge-cuts), and adapting the partitioning to changes in the structure of the graph in the face of changing workloads. To achieve these objectives, a dynamic repartitioning algorithm is re-quired to modify an existing partitioning to maintain good quality partitions while not imposing a significant overhead to the system. In this paper, we introduce a lightweight repartitioner, which dynamically modifies a partitioning using a small amount of resources. In contrast to the existing repartitioning algorithms, our lightweight repartitioner is efficient, making it suitable for use in a real system. We integrated our lightweight repartitioner into Hermes, which we designed as an extension of the open source Neo4j graph database system, to support workloads over partitioned graph data distributed over multiple servers. Using real-world social network data, we show that Hermes leverages the lightweight repartitioner to maintain high quality partitions and provides a 2 to 3 times performance improvement over the de-facto standard random hash-based partitioning.} } @inproceedings{Algocloud15, author = {Shahin Kamali}, title = {Efficient Bin Packing Algorithms for Resource Provisioning in the Cloud}, booktitle = {Proc. International Conference on Algorithmic Aspects of Cloud Computing ({ALGOCLOUD})}, pages = {84--98}, year = {2015}, url = {http://supertech.csail.mit.edu/papers/ShahinAlgoCloud15.pdf}, abstract = {We consider the Infrastructure as a Service IaaS model for cloud service providers. This model can be abstracted as a form of online bin packing problem where bins represent physical machines and items represent virtual machines with dynamic load. The input to the problem is a sequence of operations each involving an insertion, deletion or updating the size of an item. The goal is to use live migration to achieve packings with a small number of active bins. Reducing the number of bins is critical for green computing and saving on energy costs. We introduce an algorithm, named HarmonicMix, that supports all operations and moves at most ten items per operation. The algorithm achieves a competitive ratio of 4/3, implying that the number of active bins at any stage of the algorithm is at most 4/3 times more than any offline algorithm that uses infinite migration. This is an improvement over a recent result of Song et al. (IEEE Trans. Computers, 2014) who introduced an algorithm, named VISBP, with a competitive ratio of 3/2. Our experiments indicate a considerable advantage for HarmonicMix over VISBP with respect to average-case performance. HarmonicMix is simple and runs as fast as classic bin packing algorithms such as Best Fit and First Fit; this makes the algorithm suitable for practical purposes.} } @inproceedings{SOFSEM15, title={Efficient Online Strategies for Renting Servers in the Cloud}, author={Shahin Kamali and Alejandro Lopez-Ortiz}, booktitle={Proc. 41st International Conference on Current Trends in Theory and Practice of Informatics (SOFSEM)}, pages={277--288}, year={2015}, organization={Springer, Berlin, Heidelberg}, arxiv = {https://arxiv.org/abs/1408.4156}, abstract = {In Cloud systems, we often deal with jobs that arrive and depart in an online manner. Upon its arrival, a job should be assigned to a server. Each job has a size which defines the amount of resources that it needs. Servers have uniform capacity and, at all times, the total size of jobs assigned to a server should not exceed the capacity. This setting is closely related to the classic bin packing problem. The difference is that, in bin packing, the objective is to minimize the total number of used servers. In the Cloud, however, the charge for each server is proportional to the length of the time interval it is rented for, and the goal is to minimize the cost involved in renting all used servers. Recently, certain bin packing strategies were considered for renting servers in the Cloud [Li et al. SPAA'14]. There, it is proved that all Any-Fit bin packing strategy has a competitive ratio of at least \mu, where \mu is the max/min interval length ratio of jobs. It is also shown that First Fit has a competitive ratio of 2\mu + 13 while Best Fit is not competitive at all. We observe that the lower bound of \mu extends to all online algorithms. We also prove that, surprisingly, Next Fit algorithm has competitive ratio of at most 2\mu + 1. We also show that a variant of Next Fit achieves a competitive ratio of K max{1,\mu/(K-1)} + 1, where K is a parameter of the algorithm. In particular, if the value of \mu is known, the algorithm has a competitive ratio of \mu + 2; this improves upon the existing upper bound of \mu + 8. Finally, we introduce a simple algorithm called Move To Front (MTF) which has a competitive ratio of at most 6\mu + 7 and also promising average-case performance. We experimentally study the average-case performance of different algorithms and observe that the typical behaviour of MTF is distinctively better than other algorithms.} } @article{TOCS15MST, title={On Minimum- and Maximum-weight Minimum Spanning Trees with Neighborhoods}, author={Reza Dorrigiv and Robert Fraser and Meng He and Shahin Kamali and Akitoshi Kawamura and Alejandro Lopez-Ortiz and Diego Seco}, journal={Theory of Computing Systems}, volume={56}, number={1}, pages={220--250}, year={2015}, url = {https://web.cs.dal.ca/~mhe/publications/wowa12_spanningtree.pdf}, publisher={Springer US}, abstract = {We study optimization problems for the Euclidean minimum spanning tree (MST) on imprecise data. To model imprecision, we accept a set of disjoint disks in the plane as input. From each member of the set, one point must be selected, and the MST is computed over the set of selected points. We consider both minimizing and maximizing the weight of the MST over the input. The minimum weight version of the problem is known as the minimum spanning tree with neighborhoods (MSTN) problem, and the maximum weight version (max-MSTN) has not been studied previously to our knowledge. We provide deterministic and parameterized approximation algorithms for the max-MSTN problem, and a parameterized algorithm for the MSTN problem. Additionally, we present hardness of approximation proofs for both settings.} } @inproceedings{WADS15, title={Online Bin Packing with Advice of Small Size}, author={Spyros Angelopoulos and Christoph D{\"u}rr and Shahin Kamali and Marc Renault and Adi Ros{\'e}n}, booktitle={Proc. 14th International Symposium on Algorithms and Data Structures (WADS)}, pages={40--53}, year={2015}, organization={Springer, Cham}, url = {https://www.irif.fr/~adiro/publ/ADKRR.pdf}, abstract = { In this paper, we study the advice complexity of the online bin packing problem. In this well-studied setting, the online algorithm is supplemented with some additional information concerning the input. We improve upon both known upper and lower bounds of online algorithms for this problem. On the positive side, we first provide a relatively simple algorithm that achieves a competitive ratio arbitrarily close to 1.5, using constant-size advice. Our result implies that 16 bits of advice suffice to obtain a competitive ratio better than any online algorithm without advice, thus improving the previously known bound of O(log(n)) bits required to attain this performance. In addition, we introduce a more complex algorithm that still requires only constant-size advice, and has a competitive ratio arbitrarily close to 1.47012. This is the currently best performance of any online bin packing algorithm with sublinear advice. On the negative side, we extend a construction due to Boyar et al. (Algorithmica 74(1), 507-527 2016) so as to show that no online algorithm with sub-linear advice can be 7/6-competitive, improving on the lower bound of 9/8 from Boyar et al.} } } @inproceedings{CIKM15, title={{HDRF}: Stream-based Partitioning for Power-law Graphs}, author={Fabio Petroni and Leonardo Querzoni and Khuzaima Daudjee and Shahin Kamali and Giorgio Iacoboni}, booktitle={Proc. 24th {ACM} International on Conference on Information and Knowledge Management (CIKM)}, pages={243--252}, year={2015}, organization={ACM}, url = {https://www.fabiopetroni.com/Download/petroni2015HDRF.pdf}, abstract = {Balanced graph partitioning is a fundamental problem that is receiving growing attention with the emergence of distributed graph-computing (DGC) frameworks. In these frameworks, the partitioning strategy plays an important role since it drives the communication cost and the workload balance among computing nodes, thereby affecting system performance. However, existing solutions only partially exploit a key characteristic of natural graphs commonly found in the real-world: their highly skewed power-law degree distributions. In this paper, we propose High-Degree (are) Replicated First (HDRF), a novel streaming vertex-cut graph partitioning algorithm that effectively exploits skewed degree distributions by explicitly taking into account vertex degree in the placement decision. We analytically and experimentally evaluate HDRF on both synthetic and real-world graphs and show that it outperforms all existing algorithms in partitioning quality.} } @inproceedings{CCCG15, title={Online Packing of Equilateral Triangles}, author={Shahin Kamali and Alejandro Lopez-Ortiz and Zahed Rahmati}, booktitle={Proc. 27th Canadian Conf. on Computational Geometry (CCCG)}, year={2015}, pages = {6}, url = {https://research.cs.queensu.ca/cccg2015/CCCG15-papers/36.pdf}, abstract = {We investigate the online triangle packing problem in which a sequence of equilateral triangles with different sizes appear in an online, sequential manner. The goal is to place these triangles into a minimum number of squares of unit size. We provide upper and lower bounds for the competitive ratio of online algorithms. In particular, we introduce an algorithm which achieves a competitive ratio of at most 2.474. On the other hand, we show that no online algorithm can have a competitive ratio better than 1.509.} } @inproceedings{ISAAC15, title={All-Around Near-Optimal Solutions for the Online Bin Packing Problem}, author={Shahin Kamali and Alejandro Lopez-Ortiz}, booktitle={Proc. 26th International Symposium on Algorithms and Computation (ISAAC)}, pages={727--739}, year={2015}, organization={Springer, Berlin, Heidelberg}, url = {http://supertech.csail.mit.edu/papers/ShahinISAAC15.pdf}, abstract = {In this paper we present algorithms with optimal average-case and close-to-best known worst-case performance for the classic online bin packing problem. It has long been observed that known bin packing algorithms with optimal average-case performance are not optimal in the worst-case. In particular First Fit and Best Fit have optimal asymptotic average-case ratio of 1 but a worstcase competitive ratio of 1.7. The competitive ratio can be improved to 1.691 using the Harmonic algorithm. Further variations of this algorithm can push down the competitive ratio to 1.588. However, these algorithms have poor performance on average; in particular, Harmonic algorithm has average-case ratio of 1.27. In this paper, first we introduce a simple algorithm which we term Harmonic Match. This algorithm performs as well as Best Fit on average, i.e., it has an average-case ratio of 1. Moreover, the competitive ratio of the algorithm is as good as Harmonic, i.e., it converges to 1.691 which is an improvement over Best Fit and First Fit. We also introduce a different algorithm, termed as Refined Harmonic Match, which achieves an improved competitive ratio of 1.636 while maintaining the good average-case performance of Harmonic Match and Best Fit. Our experimental evaluations show that our proposed algorithms have comparable average-case performance with Best Fit and First Fit, and this holds also for sequences that follow distributions other than the uniform distribution.} } @inproceedings{LATA14, author = {Joan Boyar and Shahin Kamali and Kim S. Larsen and Alejandro Lopez{-}Ortiz}, title = {On the List Update Problem with Advice}, booktitle = {Proc. 8th International Conference on Language, Automata Theory and Applications {(LATA)}}, pages = {210--221}, year = {2014}, arxiv = {https://arxiv.org/abs/1311.7357}, abstract = {We study the online list update problem under the advice model of computation. Under this model, an online algorithm receives partial information about the unknown parts of the input in the form of some bits of advice generated by a benevolent offline oracle. We show that advice of linear size is required and sufficient for a deterministic algorithm to achieve an optimal solution or even a competitive ratio better than 15/14. On the other hand, we show that surprisingly two bits of advice are sufficient to break the lower bound of 2 on the competitive ratio of deterministic online algorithms and achieve a deterministic algorithm with a competitive ratio of 5/3. In this upper-bound argument, the bits of advice determine the algorithm with smaller cost among three classical online algorithms, TIMESTAMP and two members of the MTF2 family of algorithms. We also show that MTF2 algorithms are 2.5-competitive.} } @inproceedings{STACS14, author = {Joan Boyar and Shahin Kamali and Kim S. Larsen and Alejandro Lopez{-}Ortiz}, title = {Online Bin Packing with Advice}, booktitle = {Proc. 31st International Symposium on Theoretical Aspects of Computer Science {(STACS)}}, pages = {174--186}, year = {2014}, arxiv = {https://arxiv.org/abs/1212.4016}, abstract = {We consider the online bin packing problem under the advice complexity model where the 'online constraint' is relaxed and an algorithm receives partial information about the future requests. We provide tight upper and lower bounds for the amount of advice an algorithm needs to achieve an optimal packing. We also introduce an algorithm that, when provided with log n + o(log n) bits of advice, achieves a competitive ratio of 3/2 for the general problem. This algorithm is simple and is expected to find real-world applications. We introduce another algorithm that receives 2n + o(n) bits of advice and achieves a competitive ratio of 4/3 + epsilon. Finally, we provide a lower bound argument that implies that advice of linear size is required for an algorithm to achieve a competitive ratio better than 9/8.} } @inproceedings{DCC2014, title={Better compression through better list update algorithms}, author={Shahin Kamali and Alejandro Lopez-Ortiz}, booktitle={Data Compression Conference (DCC)}, pages={372--381}, year={2014}, organization={IEEE}, url = {https://ieeexplore.ieee.org/document/6824445}, abstract = {List update is a key step during the Burrows-Wheeler transform (BWT) compression. Previous work has shown that careful study of the list update step leads to better BWT compression. Surprisingly, the theoretical study of list update algorithms for compression has lagged behind its use in real practice. To be more precise, the standard model by Sleator and Tarjan for list update considers a 'linear cost-of-access' model while compression incurs a logarithmic cost of access, i.e. accessing item i in the list has cost Theta(i) in the standard model but Theta(log i) in compression applications. These models have been shown, in general, not to be equivalent. This paper has two contributions: (1) We give the first theoretical proof that the commonly used Move-To-Front (MTF) has good performance under the compression logarithmic cost-of-access model. This has long been known in practice but a formal proof under the logarithmic cost compression model was missing until now, (2) we further refine the online compression model to reflect its use under compression by applying the recently developed 'online algorithms with advice' model. This advice model was initially a purely theoretical construct in which the online algorithm has access to an all powerful oracle during the computation. We show that surprisingly, this seemingly unrealistic model can be used to produce better multi-pass compression algorithms. More precisely, we introduce an 'almost-online' list update algorithm, which we term BIB which results in a compression scheme which is superior to schemes using standard online algorithms, in particular those of MTF and TIMESTAMP. For example, for the files in the standard Canterbury Corpus, the compression ratio of the scheme that uses BIB is 33.66 on average, while the compression ratios for the schemes that use MTF and TIMESTAMP are respectively 34.25 and 36.30.} } @article{Algorithmica14ICALP, title={Compact navigation and distance oracles for graphs with small treewidth}, author={Arash Farzan and Shahin Kamali}, journal={Algorithmica}, volume={69}, number={1}, pages={92--116}, year={2014}, url = {https://cs.uwaterloo.ca/research/tr/2011/CS-2011-15.pdf}, publisher={Springer US}, abstract = {Given an unlabeled, unweighted, and undirected graph with $n$ vertices and small (but not necessarily constant) treewidth $k$, we consider the problem of preprocessing the graph to build space-efficient encodings (oracles) to perform various queries efficiently. We assume the word RAM model where the size of a word is $\omegah{\log n}$ bits. The first oracle, we present, is the navigation oracle which facilitates primitive navigation operations of adjacency, neighborhood, and degree queries. By way of an enumeration argument, which is of interest in its own right, we show the space requirement of the oracle is optimal to within lower order terms for all graphs with $n$ vertices and treewidth $k$. The oracle supports the mentioned queries all in constant worst-case time. The second oracle, we present, is an exact distance oracle which facilitates distance queries between any pair of vertices (i.e., an all-pairs shortest-path oracle). The space requirement of the oracle is also optimal to within lower order terms. Moreover, the distance queries perform in $\oh{k^3\log^3 k}$ time. Particularly, for the class of graphs of popular interest, graphs of bounded treewidth (where $k$ is constant), the distances are reported in constant worst-case time. } } @phdthesis{Kamali14Thesis, type = { Doctoral Thesis}, year = {2014}, school = { University of Waterloo }, organization = {David Cheriton School of Computer Science }, title = {Alternative Approaches for Analysis of Bin Packing and List Update Problems}, school = {University of Waterloo, Ontario, Canada}, pages = {137}, url = {https://uwspace.uwaterloo.ca/handle/10012/8843}, author = { Shahin Kamali }, abstract = {In this thesis we introduce and evaluate new algorithms and models for the analysis of online bin packing and list update problems. These are two classic online problems which are extensively studied in the literature and have many applications in the real world. The main goal of this thesis is to develop new approaches for studying online problems, and in particular bin packing and list update, to guide development of practical algorithms performing quite well on real-world inputs. In doing so, we introduce new algorithms with good performance (not only under the competitive analysis) as well as new models which are more realistic for certain applications of the studied problems. In doing so, we introduce online bin packing algorithms which outperform Best Fit and First Fit in terms of competitive ratio while maintaining their good average-case performance. An alternative for analysis of online problems is the advice model which has received significant attention in the past few years. Under the advice model, an online algorithm receives a number of bits of advice about the unrevealed parts of the sequence. Generally, there is a trade-off between the size of the advice and the performance of online algorithms. The advice model generalizes the existing frameworks in which an online algorithm has partial knowledge about the input sequence, e.g., the access graph model for the paging problem. We study list update and bin packing problems under the advice model and answer several relevant questions about the advice complexity of these problems. Online problems are usually studied under specific settings which are not necessarily valid for all applications of the problem. As an example, online bin packing algorithms are widely used for server consolidation to minimize the number of active servers in a data center. In some applications, e.g., tenant placement in the Cloud, often a `fault-tolerant' solution for server consolidation is required. In this setting, the problem becomes different and the classic algorithms can no longer be used. We study a fault-tolerant model for the bin packing problem and analyze algorithms which fit this particular application of the problem. Similarly, the list update problem was initially proposed for maintaining self-adjusting linked lists. However, presently, the main application of the problem is in the data compression realm. We show that the standard cost model is not suitable for compression purposes and study a compression cost model for the list update problem. Our analysis justifies the advantage of the compression schemes which are based on Move-To-Front algorithm and might lead to improved compression algorithms.} } @inproceedings{SPAA14, title={On the Online Fault-tolerant Server Consolidation Problem}, author={Khuzaima Daudjee and Shahin Kamali and Alejandro Lopez-Ortiz}, booktitle={Proc. of the 26th ACM symposium on Parallelism in algorithms and architectures (SPAA)}, pages={12--21}, year={2014}, organization={ACM}, url = {https://dl.acm.org/doi/10.1145/2612669.2612686}, abstract = {In the server consolidation problem, the goal is to minimize the number of servers needed to host a set of clients. The clients appear in an online manner and each of them has a certain load. The servers have uniform capacity and the total load of clients assigned to a server must not exceed this capacity. Additionally, to have a fault-tolerant solution, the load of each client should be distributed between at least two different servers so that failure of one server avoids service interruption by migrating the load to the other servers hosting the respective second loads. In a simple setting, upon receiving a client, an online algorithm needs to select two servers and assign half of the load of the client to each server. We analyze the problem in the framework of competitive analysis. First, we provide upper and lower bounds for the competitive ratio of two well known heuristics which are introduced in the context of tenant placement in the cloud. In particular, we show their competitive ratios are no better than 2. We then present a new algorithm called Horizontal Harmonic and show that it has an improved competitive ratio which converges to 1.59. The simplicity of this algorithm makes it a good choice for use by cloud service providers. Finally, we prove a general lower bound that shows any online algorithm for the online fault-tolerant server consolidation problem has a competitive ratio of at least 1.42.} } @inproceedings{CCCG14, title={Almost Online Square Packing}, author={Shahin Kamali and Alejandro Lopez-Ortiz}, booktitle={Proc. 26th Canadian Conf. on Computational Geometry (CCCG)}, year={2014}, pages = {1-7}, url = {http://www.cccg.ca/proceedings/2014/papers/paper24.pdf}, abstract = {In the square packing problem, the goal is to pack squares of different sizes into the smallest number of bins (squares) of uniform size. We introduce an almostonline square packing algorithm which places squares in an online, sequential manner. In doing so, it receives advice of logarithmic size from an offline oracle which runs in linear time. Our algorithm achieve a competitive ratio of at most 1.84 which is significantly better than the best existing online algorithm which has a competitive ratio of 2.1187. In introducing the algorithm, we have been inspired by the advice model for the analyses of online problems. Our algorithm can also be regarded as a streaming algorithm which packs an input sequence of squares in two passes using a space of logarithmic size.} } @inproceedings{IanFest13, author = {Shahin Kamali and Alejandro Lopez{-}Ortiz}, title = {A Survey of Algorithms and Models for List Update}, booktitle = {Proc. Conference on Space-Efficient Data Structures, Streams, and Algorithms - Papers in Honor of J. Ian Munro on the Occasion of His 66th Birthday}, pages = {251--266}, year = {2013}, url = {https://www.springerprofessional.de/en/a-survey-of-algorithms-and-models-for-list-update/4157944}, abstract = {The list update problem was first studied by McCabe more than 45 years ago under distributional analysis in the context of maintaining a sequential file. In 1985, Sleator and Tarjan introduced the competitive ratio framework for the study of worst case behavior on list update algorithms. Since then, many deterministic and randomized online algorithms have been proposed and studied under this framework. The standard model as originally introduced has a peculiar cost function for the rearrangement of the list after each search operation. To address this, several variants have been introduced, chiefly the MRM model (Martínez and Roura; and Munro), the paid exchange model, and the compression model. Additionally, the list update problem has been studied under locality of reference assumptions, and several models have been proposed to capture locality of input sequences. This survey gives a brief overview of the main list update algorithms, the main alternative cost models, and the related results for list update with locality of reference. Open problems and directions for future work are included.} } @inproceedings{DCC13Context, title={Context-based algorithms for the list-update problem under alternative cost models}, author={Shahin Kamali and Susana Ladra and Alejandro Lopez-Ortiz and Diego Seco}, booktitle={Data Compression Conference (DCC), 2013}, pages={361--370}, year={2013}, organization={IEEE}, url = {https://ieeexplore-ieee-org.uml.idm.oclc.org/document/6543072}, abstract = {The List-Update Problem is a well studied online problem with direct applications in data compression. Although the model proposed by Sleator & Tarjan has become the standard in the field for the problem, its applicability in some domains, and in particular for compression purposes, has been questioned. In this paper, we focus on two alternative models for the problem that arguably have more practical significance than the standard model. We provide new algorithms for these models, and show that these algorithms outperform all classical algorithms under the discussed models. This is done via an empirical study of the performance of these algorithms on the reference data set for the list-update problem. The presented algorithms make use of the context-based strategies for compression, which have not been considered before in the context of the list-update problem and lead to improved compression algorithms. In addition, we study the adaptability of these algorithms to different measures of locality of reference and compressibility.} } @inproceedings{WALCOM13, title={Broadcasting in Conflict-Aware Multi-channel Networks}, author={Francisco Claude and Reza Dorrigiv and Shahin Kamali and Alejandro Lopez-Ortiz and Pawel Pralat and Jazmin Romero and Alejandro Salinger and Diego Seco}, booktitle={Proc. 7th International Conference and Workshop on Algorithms and Computation (WALCOM)}, pages={158--169}, year={2013}, url = {https://math.ryerson.ca/~pralat/papers/2013_broadcasting.pdf}, abstract = {The broadcasting problem asks for the fastest way of transmitting a message to all nodes of a communication network. We consider the problem in conflict-aware multi-channel networks. These networks can be modeled as undirected graphs in which each edge is labeled with a set of available channels to transmit data between its endpoints. Each node can send and receive data through any channel on its incident edges, with the restriction that it cannot successfully receive through a channel when multiple neighbors send data via that channel simultaneously. We present efficient algorithms as well as hardness results for the broadcasting problem on various network topologies. We propose polynomial time algorithms for optimal broadcasting in grids, and also for trees when there is only one channel on each edge. Nevertheless, we show that the problem is NP-hard for trees in general, as well as for complete graphs. In addition, we consider balanced complete graphs and propose a policy for assigning channels to these graphs. This policy, together with its embedded broadcasting schemes, result in fault-tolerant networks which have optimal broadcasting time. } } @inproceedings{NCA13, title={Data Partitioning for Video-on-Demand Services}, author={Lei, Bairong and Surya, Ivan and Kamali, Shahin and Daudjee, Khuzaima}, booktitle={Proc. 12th IEEE International Symposium on Network Computing and Applications (NCA)}, pages={49--54}, year={2013}, organization={IEEE}, url = {https://ieeexplore.ieee.org/document/6623640}, abstract = {Streaming video over the Internet has become a popular service that is projected to outstrip demand over most other real-time streaming services. Thus, it is essential to provide scalable server storage and services without which video-on-demand services will not be able to provide good performance in the face of increasing demand. In this paper, we study algorithms, and propose one, for simple partitioning video-on-demand storage to distribute content and workload among servers within a master-slave architecture. We show the effectiveness of the partitioning strategies by conducting experiments on an actual system to quantify trade-offs between the algorithms.} } @inproceedings{SIROCCO13, author = {Sushmita Gupta and Shahin Kamali and Alejandro Lopez{-}Ortiz}, title = {On Advice Complexity of the k-server Problem under Sparse Metrics}, booktitle = {Proc. 20th International Colloquium on Structural Information and Communication Complexity {(SIROCCO)}}, pages = {55--67}, year = {2013}, arxiv = {https://arxiv.org/abs/1305.2108}, abstract = {We consider the k-server problem under the advice model of computation when the underlying metric space is sparse. On one side, we introduce $\Theta(1)$-competitive algorithms for a wide range of sparse graphs, which require advice of (almost) linear size. Namely, we show that for graphs of size $N$ and treewidth $\alpha$, there is an online algorithm which receives $O{n (\log \alpha + \log \log N)}$ bits of advice and optimally serves a sequence of length $n$. We also show that if a graph admits a system of $\mu$ collective tree $(q,r)$-spanners, then there is a $(q+r)$-competitive algorithm which receives $\oh{n (\log \mu + \log \log N)}$ bits of advice. Among other results, this gives a $3$-competitive algorithm for planar graphs, provided with $\oh{n \log \log N}$ bits of advice. On the other side, we show that advice of size $\Omega(n)$ is required to obtain a $1$-competitive algorithm for sequences of size $n$ even for the 2-server problem on a path metric of size $N \geq 5$. Through another lower bound argument, we show that at least $\frac{n}{2}(\log \alpha- 1.22)$ bits of advice is required to obtain an optimal solution for metric spaces of treewidth $\alpha$, where $4 \leq \alpha < 2k$. } } @inproceedings{WAOA12, author = {Reza Dorrigiv and Robert Fraser and Meng He and Shahin Kamali and Akitoshi Kawamura and Alejandro Lopez{-}Ortiz and Diego Seco}, title = {On Minimum-and Maximum-Weight Minimum Spanning Trees with Neighborhoods}, booktitle = {Proc. 10th International Workshop on Approximation and Online Algorithms {(WAOA)}}, pages = {93--106}, year = {2012}, url = {https://web.cs.dal.ca/~mhe/publications/wowa12_spanningtree.pdf}, publisher={Springer US}, abstract = {We study optimization problems for the Euclidean minimum spanning tree (MST) on imprecise data. To model imprecision, we accept a set of disjoint disks in the plane as input. From each member of the set, one point must be selected, and the MST is computed over the set of selected points. We consider both minimizing and maximizing the weight of the MST over the input. The minimum weight version of the problem is known as the minimum spanning tree with neighborhoods (MSTN) problem, and the maximum weight version (max-MSTN) has not been studied previously to our knowledge. We provide deterministic and parameterized approximation algorithms for the max-MSTN problem, and a parameterized algorithm for the MSTN problem. Additionally, we present hardness of approximation proofs for both settings.} } @inproceedings{ICALP11, author = {Arash Farzan and Shahin Kamali}, title = {Compact Navigation and Distance Oracles for Graphs with Small Treewidth}, booktitle = {Proc. 38th International Colloquium on Automata, Languages and Programming (ICALP)}, pages = {268--280}, year = {2011}, url = {https://cs.uwaterloo.ca/research/tr/2011/CS-2011-15.pdf}, publisher={Springer US}, abstract = {Given an unlabeled, unweighted, and undirected graph with $n$ vertices and small (but not necessarily constant) treewidth $k$, we consider the problem of preprocessing the graph to build space-efficient encodings (oracles) to perform various queries efficiently. We assume the word RAM model where the size of a word is $\omegah{\log n}$ bits. The first oracle, we present, is the navigation oracle which facilitates primitive navigation operations of adjacency, neighborhood, and degree queries. By way of an enumeration argument, which is of interest in its own right, we show the space requirement of the oracle is optimal to within lower order terms for all graphs with $n$ vertices and treewidth $k$. The oracle supports the mentioned queries all in constant worst-case time. The second oracle, we present, is an exact distance oracle which facilitates distance queries between any pair of vertices (i.e., an all-pairs shortest-path oracle). The space requirement of the oracle is also optimal to within lower order terms. Moreover, the distance queries perform in $\oh{k^3\log^3 k}$ time. Particularly, for the class of graphs of popular interest, graphs of bounded treewidth (where $k$ is constant), the distances are reported in constant worst-case time. } } @inproceedings{IPCCC11, title={Dynamic data allocation with replication in distributed systems}, author={Shahin Kamali and Pedram Ghodsnia and Khuzaima Daudjee}, booktitle={Proc. 30th International Performance Computing and Communications Conference (IPCCC)}, pages={1--8}, year={2011}, organization={IEEE}, url = {https://ieeexplore.ieee.org/document/6108075}, abstract = {The goal of fragment allocation in distributed database systems is to place data fragments at sites so as to minimize the overall data transmission cost incurred to answer queries. We consider the problem of fragment allocation in lazily replicated systems and address both placement and replication issues in an integrated approach. While replication can improve performance via increased locality, excessive replication can incur extra overhead cost to maintain replicas. A comprehensive model that takes into account network topology, fragment correlation, and data access patterns is presented. Based on this model, we propose an algorithm to find near-optimal dynamic allocation solutions. Experimental results show the efficacy of the proposed solution.} } @inproceedings{SOFSEM10, title={Optimum broadcasting in complete weighted-vertex graphs}, author={Hovhannes Harutyunyan and Shahin Kamali}, booktitle={Proc. 36th International Conference on Current Trends in Theory and Practice of Informatics (SOFSEM)}, pages={489--502}, year={2010}, publisher={Springer Berlin/Heidelberg}, url = {https://link.springer.com/chapter/10.1007/978-3-642-11266-9_41}, abstract = {The weighted-vertex broadcast model is an extension of the original model of broadcasting to the graphs with weighted vertices. A vertex of weight w is supposed to wait for w time rounds before sending data to its neighbors; after this delay, in each round, it can inform one of its neighbors with no additional delay. We consider the problem in complete weighted-vertex graphs, and introduce a set of properties which describe a class of optimum solutions. Based on these, we present an algorithm which gives optimum broadcast schemes in polynomial time.} } @mastersthesis{Kamali08Masters, title={Broadcasting in Weighted-vertex Graphs}, author={Shahin Kamali} , year={2008}, school={Concordia University}, type = { M.Sc. Thesis}, organization = {Department of Computer Science and Software Engineering}, url = {https://spectrum.library.concordia.ca/id/eprint/976179/}, abstract = {In this thesis, we present a new model for information dissemination in communication networks. The model is defined on networks in which nodes are assigned some weights representing the internal delay they should pass before sending data to their neighbors. The new model, called weighted-vertex model, comes to have real world applications in parallel computation and satellite terrestrial networks. Broadcasting in weighted-vertex model is a generalized version of classical broadcasting problem, which is NP_Complete. The problem remains NP-Complete in some classes of weighed-vertex graphs. We show existence of approximation algorithms for the broadcasting problem in weighted vertex model, as well as better approximations for specific subclasses of weighted graphs. In addition we study broadcasting in complete weighted graphs and present an algorithm for finding the optimum solution in this case. Broadcasting in complete graphs with uniform weights is studied separately. Finally we introduce some heuristics for the problem and compare their performance using computer simulations.} } @inproceedings{ICPADS08, title={Efficient broadcasting in networks with weighted nodes}, author={Hovhannes Harutyunyan and Shahin Kamali}, booktitle={Proc. 14th IEEE International Conference on Parallel and Distributed Systems {(ICPADS)}}, pages={879--884}, year={2008}, organization={IEEE}, url = {https://ieeexplore.ieee.org/document/4724411}, abstract = {In this paper the problem of broadcasting in networks with weighted nodes is considered. This model is defined on networks in which nodes are assigned some weights representing the internal process or delay that they should perform before sending data to their neighboring nodes. This model has real world applications in parallel computation and satellite terrestrial networks. The problem is shown to be NP-complete. In this paper we present three algorithms to find near optimal broadcast time of an arbitrary network with weighted nodes. We also present our simulation results to compare the performance of these algorithms.} } @inproceedings{ISPA08, title={Broadcasting in weighted-vertex graphs}, author={Hovhannes Harutyunyan and Shahin Kamali}, booktitle={Proc. International Symposium on Parallel and Distributed Processing with Applications ({ISPA}) }, pages={301--307}, year={2008}, organization={IEEE}, url = {https://ieeexplore-ieee-org.uml.idm.oclc.org/document/4725161}, abstract = {In this paper a new model for information dissemination in communication network is presented. The model is defined on networks in which nodes are assigned some weights representing the internal delay they should pass before sending data to their neighbors. The new model, called weighted-vertex model, comes to have real world applications in parallel computation and satellite terrestrial networks. As a generalization of the classical model, optimum broadcasting in weighted-vertex model is NP_Hard. The problem remains NP_Hard in some classes of weighed-vertex graphs. We show existence of approximation algorithms for the broadcasting problem in weighted vertex model, as well as better approximations for specific subclasses of weighted graphs.} } @inproceedings{PDPTA08, author = {Hovhannes Harutyunyan and Shahin Kamali and Talin Moradian}, title = {Multi-Shared-Trees Based Multicasting in Mesh-Connected Networks}, booktitle = {Proc. International Conference on Parallel and Distributed Processing Techniques and Applications ({PDPTA})}, pages = {178--182}, year = {2008}, abstract = {Performance of multicomputers is a matter of underlying network communications such as multicast. Multicasting is an efficient information dissemination for a subscribed user group on networks. We study multicasting problem in meshconnected networks. Having a group of processors distributed on the mesh, the goal is to present a routing strategy such that every member of the group can effectively transmit data to the other members. The conventional strategies to the problem are source-based and shared-tree approaches, each having drawbacks in efficiency or scalability of large networks. To compromise between these two we use multi-shared tree approach. We apply a core-selection algorithm based on taxicab geometry to create a small number of distribution trees that are almost optimum with respect to multicast time and traffic.} } @incollection{ROBOT07, title={Positioning in Robots Soccer}, author={Hesam T. Dashti and Shahin Kamali and Nima Aghaeepour}, booktitle={Robotic soccer}, year={2007}, publisher={I-Tech Education and Publishing (book chapter)}, pages = {29--46}, url = {https://www.intechopen.com/chapters/519}, abstract = {In this chapter, the focus is on an important issue of robots’ decision-making process which is positioning. Positioning involves making the best decision for the agents who do not possess the ball regarding the team strategy and consequently finding the best target for them. We overview the importance of positioning, review the existing approaches for position, including strategic positioning, dynamic positioning, and positioning using machine learning. We particularly discuss a dynamic positioning approach that uses Voronoi diagrams and provide evidence for its efficiency and effectiveness. } } @inproceedings{ROBOCUP05, title={Dynamic positioning based on voronoi cells (DPVC)}, author={HesamAddin Torabi Dashti and Nima Aghaeepour and Sahar Asadi and Meysam Bastani and Zahra Delafkar and Fatemeh Miri Disfani and Serveh Mam Ghaderi and Shahin Kamali and Sepideh Pashami and Alireza Fotuhi Siahpirani}, booktitle={Robot Soccer World Cup}, pages={219--229}, year={2005}, organization={Springer, Berlin, Heidelberg}, url = {https://link.springer.com/chapter/10.1007/11780519_20}, abstract = {In this paper we are proposing an approach for flexible positioning of players in Soccer Simulation in a Multi-Agent environment. We introduce Dynamic Positioning based on Voronoi Cells (DPVC) as a new method for players’ positioning which uses Voronoi Diagram for distributing agents in the field. This method also uses Attraction Vectors that indicate agents’ tendency to specific objects in the field with regard to the game situation and players’ roles. Finally DPVC is compared with SBSP as the conventional method of positioning.} }